• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef _LINUX_SCHED_H
2 #define _LINUX_SCHED_H
3 
4 /*
5  * cloning flags:
6  */
7 #define CSIGNAL		0x000000ff	/* signal mask to be sent at exit */
8 #define CLONE_VM	0x00000100	/* set if VM shared between processes */
9 #define CLONE_FS	0x00000200	/* set if fs info shared between processes */
10 #define CLONE_FILES	0x00000400	/* set if open files shared between processes */
11 #define CLONE_SIGHAND	0x00000800	/* set if signal handlers and blocked signals shared */
12 #define CLONE_PTRACE	0x00002000	/* set if we want to let tracing continue on the child too */
13 #define CLONE_VFORK	0x00004000	/* set if the parent wants the child to wake it up on mm_release */
14 #define CLONE_PARENT	0x00008000	/* set if we want to have the same parent as the cloner */
15 #define CLONE_THREAD	0x00010000	/* Same thread group? */
16 #define CLONE_NEWNS	0x00020000	/* New namespace group? */
17 #define CLONE_SYSVSEM	0x00040000	/* share system V SEM_UNDO semantics */
18 #define CLONE_SETTLS	0x00080000	/* create a new TLS for the child */
19 #define CLONE_PARENT_SETTID	0x00100000	/* set the TID in the parent */
20 #define CLONE_CHILD_CLEARTID	0x00200000	/* clear the TID in the child */
21 #define CLONE_DETACHED		0x00400000	/* Unused, ignored */
22 #define CLONE_UNTRACED		0x00800000	/* set if the tracing process can't force CLONE_PTRACE on this clone */
23 #define CLONE_CHILD_SETTID	0x01000000	/* set the TID in the child */
24 /* 0x02000000 was previously the unused CLONE_STOPPED (Start in stopped state)
25    and is now available for re-use. */
26 #define CLONE_NEWUTS		0x04000000	/* New utsname group? */
27 #define CLONE_NEWIPC		0x08000000	/* New ipcs */
28 #define CLONE_NEWUSER		0x10000000	/* New user namespace */
29 #define CLONE_NEWPID		0x20000000	/* New pid namespace */
30 #define CLONE_NEWNET		0x40000000	/* New network namespace */
31 #define CLONE_IO		0x80000000	/* Clone io context */
32 
33 /*
34  * Scheduling policies
35  */
36 #define SCHED_NORMAL		0
37 #define SCHED_FIFO		1
38 #define SCHED_RR		2
39 #define SCHED_BATCH		3
40 /* SCHED_ISO: reserved but not implemented yet */
41 #define SCHED_IDLE		5
42 /* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */
43 #define SCHED_RESET_ON_FORK     0x40000000
44 
45 #ifdef __KERNEL__
46 
47 struct sched_param {
48 	int sched_priority;
49 };
50 
51 #include <asm/param.h>	/* for HZ */
52 
53 #include <linux/capability.h>
54 #include <linux/threads.h>
55 #include <linux/kernel.h>
56 #include <linux/types.h>
57 #include <linux/timex.h>
58 #include <linux/jiffies.h>
59 #include <linux/rbtree.h>
60 #include <linux/thread_info.h>
61 #include <linux/cpumask.h>
62 #include <linux/errno.h>
63 #include <linux/nodemask.h>
64 #include <linux/mm_types.h>
65 
66 #include <asm/page.h>
67 #include <asm/ptrace.h>
68 #include <asm/cputime.h>
69 
70 #include <linux/smp.h>
71 #include <linux/sem.h>
72 #include <linux/signal.h>
73 #include <linux/compiler.h>
74 #include <linux/completion.h>
75 #include <linux/pid.h>
76 #include <linux/percpu.h>
77 #include <linux/topology.h>
78 #include <linux/proportions.h>
79 #include <linux/seccomp.h>
80 #include <linux/rcupdate.h>
81 #include <linux/rculist.h>
82 #include <linux/rtmutex.h>
83 
84 #include <linux/time.h>
85 #include <linux/param.h>
86 #include <linux/resource.h>
87 #include <linux/timer.h>
88 #include <linux/hrtimer.h>
89 #include <linux/task_io_accounting.h>
90 #include <linux/latencytop.h>
91 #include <linux/cred.h>
92 #include <linux/llist.h>
93 
94 #include <asm/processor.h>
95 
96 struct exec_domain;
97 struct futex_pi_state;
98 struct robust_list_head;
99 struct bio_list;
100 struct fs_struct;
101 struct perf_event_context;
102 struct blk_plug;
103 
104 /*
105  * List of flags we want to share for kernel threads,
106  * if only because they are not used by them anyway.
107  */
108 #define CLONE_KERNEL	(CLONE_FS | CLONE_FILES | CLONE_SIGHAND)
109 
110 /*
111  * These are the constant used to fake the fixed-point load-average
112  * counting. Some notes:
113  *  - 11 bit fractions expand to 22 bits by the multiplies: this gives
114  *    a load-average precision of 10 bits integer + 11 bits fractional
115  *  - if you want to count load-averages more often, you need more
116  *    precision, or rounding will get you. With 2-second counting freq,
117  *    the EXP_n values would be 1981, 2034 and 2043 if still using only
118  *    11 bit fractions.
119  */
120 extern unsigned long avenrun[];		/* Load averages */
121 extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
122 
123 #define FSHIFT		11		/* nr of bits of precision */
124 #define FIXED_1		(1<<FSHIFT)	/* 1.0 as fixed-point */
125 #define LOAD_FREQ	(5*HZ+1)	/* 5 sec intervals */
126 #define EXP_1		1884		/* 1/exp(5sec/1min) as fixed-point */
127 #define EXP_5		2014		/* 1/exp(5sec/5min) */
128 #define EXP_15		2037		/* 1/exp(5sec/15min) */
129 
130 #define CALC_LOAD(load,exp,n) \
131 	load *= exp; \
132 	load += n*(FIXED_1-exp); \
133 	load >>= FSHIFT;
134 
135 extern unsigned long total_forks;
136 extern int nr_threads;
137 DECLARE_PER_CPU(unsigned long, process_counts);
138 extern int nr_processes(void);
139 extern unsigned long nr_running(void);
140 extern unsigned long nr_uninterruptible(void);
141 extern unsigned long nr_iowait(void);
142 extern unsigned long nr_iowait_cpu(int cpu);
143 extern unsigned long this_cpu_load(void);
144 
145 
146 extern void calc_global_load(unsigned long ticks);
147 
148 extern unsigned long get_parent_ip(unsigned long addr);
149 
150 struct seq_file;
151 struct cfs_rq;
152 struct task_group;
153 #ifdef CONFIG_SCHED_DEBUG
154 extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
155 extern void proc_sched_set_task(struct task_struct *p);
156 extern void
157 print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
158 #else
159 static inline void
proc_sched_show_task(struct task_struct * p,struct seq_file * m)160 proc_sched_show_task(struct task_struct *p, struct seq_file *m)
161 {
162 }
proc_sched_set_task(struct task_struct * p)163 static inline void proc_sched_set_task(struct task_struct *p)
164 {
165 }
166 static inline void
print_cfs_rq(struct seq_file * m,int cpu,struct cfs_rq * cfs_rq)167 print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
168 {
169 }
170 #endif
171 
172 /*
173  * Task state bitmask. NOTE! These bits are also
174  * encoded in fs/proc/array.c: get_task_state().
175  *
176  * We have two separate sets of flags: task->state
177  * is about runnability, while task->exit_state are
178  * about the task exiting. Confusing, but this way
179  * modifying one set can't modify the other one by
180  * mistake.
181  */
182 #define TASK_RUNNING		0
183 #define TASK_INTERRUPTIBLE	1
184 #define TASK_UNINTERRUPTIBLE	2
185 #define __TASK_STOPPED		4
186 #define __TASK_TRACED		8
187 /* in tsk->exit_state */
188 #define EXIT_ZOMBIE		16
189 #define EXIT_DEAD		32
190 /* in tsk->state again */
191 #define TASK_DEAD		64
192 #define TASK_WAKEKILL		128
193 #define TASK_WAKING		256
194 #define TASK_STATE_MAX		512
195 
196 #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW"
197 
198 extern char ___assert_task_state[1 - 2*!!(
199 		sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
200 
201 /* Convenience macros for the sake of set_task_state */
202 #define TASK_KILLABLE		(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
203 #define TASK_STOPPED		(TASK_WAKEKILL | __TASK_STOPPED)
204 #define TASK_TRACED		(TASK_WAKEKILL | __TASK_TRACED)
205 
206 /* Convenience macros for the sake of wake_up */
207 #define TASK_NORMAL		(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
208 #define TASK_ALL		(TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
209 
210 /* get_task_state() */
211 #define TASK_REPORT		(TASK_RUNNING | TASK_INTERRUPTIBLE | \
212 				 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
213 				 __TASK_TRACED)
214 
215 #define task_is_traced(task)	((task->state & __TASK_TRACED) != 0)
216 #define task_is_stopped(task)	((task->state & __TASK_STOPPED) != 0)
217 #define task_is_dead(task)	((task)->exit_state != 0)
218 #define task_is_stopped_or_traced(task)	\
219 			((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
220 #define task_contributes_to_load(task)	\
221 				((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
222 				 (task->flags & PF_FROZEN) == 0)
223 
224 #define __set_task_state(tsk, state_value)		\
225 	do { (tsk)->state = (state_value); } while (0)
226 #define set_task_state(tsk, state_value)		\
227 	set_mb((tsk)->state, (state_value))
228 
229 /*
230  * set_current_state() includes a barrier so that the write of current->state
231  * is correctly serialised wrt the caller's subsequent test of whether to
232  * actually sleep:
233  *
234  *	set_current_state(TASK_UNINTERRUPTIBLE);
235  *	if (do_i_need_to_sleep())
236  *		schedule();
237  *
238  * If the caller does not need such serialisation then use __set_current_state()
239  */
240 #define __set_current_state(state_value)			\
241 	do { current->state = (state_value); } while (0)
242 #define set_current_state(state_value)		\
243 	set_mb(current->state, (state_value))
244 
245 /* Task command name length */
246 #define TASK_COMM_LEN 16
247 
248 #include <linux/spinlock.h>
249 
250 /*
251  * This serializes "schedule()" and also protects
252  * the run-queue from deletions/modifications (but
253  * _adding_ to the beginning of the run-queue has
254  * a separate lock).
255  */
256 extern rwlock_t tasklist_lock;
257 extern spinlock_t mmlist_lock;
258 
259 struct task_struct;
260 
261 #ifdef CONFIG_PROVE_RCU
262 extern int lockdep_tasklist_lock_is_held(void);
263 #endif /* #ifdef CONFIG_PROVE_RCU */
264 
265 extern void sched_init(void);
266 extern void sched_init_smp(void);
267 extern asmlinkage void schedule_tail(struct task_struct *prev);
268 extern void init_idle(struct task_struct *idle, int cpu);
269 extern void init_idle_bootup_task(struct task_struct *idle);
270 
271 extern int runqueue_is_locked(int cpu);
272 
273 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
274 extern void select_nohz_load_balancer(int stop_tick);
275 extern void set_cpu_sd_state_idle(void);
276 extern int get_nohz_timer_target(void);
277 #else
select_nohz_load_balancer(int stop_tick)278 static inline void select_nohz_load_balancer(int stop_tick) { }
set_cpu_sd_state_idle(void)279 static inline void set_cpu_sd_state_idle(void) { }
280 #endif
281 
282 /*
283  * Only dump TASK_* tasks. (0 for all tasks)
284  */
285 extern void show_state_filter(unsigned long state_filter);
286 
show_state(void)287 static inline void show_state(void)
288 {
289 	show_state_filter(0);
290 }
291 
292 extern void show_regs(struct pt_regs *);
293 
294 /*
295  * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
296  * task), SP is the stack pointer of the first frame that should be shown in the back
297  * trace (or NULL if the entire call-chain of the task should be shown).
298  */
299 extern void show_stack(struct task_struct *task, unsigned long *sp);
300 
301 void io_schedule(void);
302 long io_schedule_timeout(long timeout);
303 
304 extern void cpu_init (void);
305 extern void trap_init(void);
306 extern void update_process_times(int user);
307 extern void scheduler_tick(void);
308 
309 extern void sched_show_task(struct task_struct *p);
310 
311 #ifdef CONFIG_LOCKUP_DETECTOR
312 extern void touch_softlockup_watchdog(void);
313 extern void touch_softlockup_watchdog_sync(void);
314 extern void touch_all_softlockup_watchdogs(void);
315 extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
316 				  void __user *buffer,
317 				  size_t *lenp, loff_t *ppos);
318 extern unsigned int  softlockup_panic;
319 void lockup_detector_init(void);
320 #else
touch_softlockup_watchdog(void)321 static inline void touch_softlockup_watchdog(void)
322 {
323 }
touch_softlockup_watchdog_sync(void)324 static inline void touch_softlockup_watchdog_sync(void)
325 {
326 }
touch_all_softlockup_watchdogs(void)327 static inline void touch_all_softlockup_watchdogs(void)
328 {
329 }
lockup_detector_init(void)330 static inline void lockup_detector_init(void)
331 {
332 }
333 #endif
334 
335 #ifdef CONFIG_DETECT_HUNG_TASK
336 extern unsigned int  sysctl_hung_task_panic;
337 extern unsigned long sysctl_hung_task_check_count;
338 extern unsigned long sysctl_hung_task_timeout_secs;
339 extern unsigned long sysctl_hung_task_warnings;
340 extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
341 					 void __user *buffer,
342 					 size_t *lenp, loff_t *ppos);
343 #else
344 /* Avoid need for ifdefs elsewhere in the code */
345 enum { sysctl_hung_task_timeout_secs = 0 };
346 #endif
347 
348 /* Attach to any functions which should be ignored in wchan output. */
349 #define __sched		__attribute__((__section__(".sched.text")))
350 
351 /* Linker adds these: start and end of __sched functions */
352 extern char __sched_text_start[], __sched_text_end[];
353 
354 /* Is this address in the __sched functions? */
355 extern int in_sched_functions(unsigned long addr);
356 
357 #define	MAX_SCHEDULE_TIMEOUT	LONG_MAX
358 extern signed long schedule_timeout(signed long timeout);
359 extern signed long schedule_timeout_interruptible(signed long timeout);
360 extern signed long schedule_timeout_killable(signed long timeout);
361 extern signed long schedule_timeout_uninterruptible(signed long timeout);
362 asmlinkage void schedule(void);
363 extern void schedule_preempt_disabled(void);
364 extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
365 
366 struct nsproxy;
367 struct user_namespace;
368 
369 /*
370  * Default maximum number of active map areas, this limits the number of vmas
371  * per mm struct. Users can overwrite this number by sysctl but there is a
372  * problem.
373  *
374  * When a program's coredump is generated as ELF format, a section is created
375  * per a vma. In ELF, the number of sections is represented in unsigned short.
376  * This means the number of sections should be smaller than 65535 at coredump.
377  * Because the kernel adds some informative sections to a image of program at
378  * generating coredump, we need some margin. The number of extra sections is
379  * 1-3 now and depends on arch. We use "5" as safe margin, here.
380  */
381 #define MAPCOUNT_ELF_CORE_MARGIN	(5)
382 #define DEFAULT_MAX_MAP_COUNT	(USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
383 
384 extern int sysctl_max_map_count;
385 
386 #include <linux/aio.h>
387 
388 #ifdef CONFIG_MMU
389 extern void arch_pick_mmap_layout(struct mm_struct *mm);
390 extern unsigned long
391 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
392 		       unsigned long, unsigned long);
393 extern unsigned long
394 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
395 			  unsigned long len, unsigned long pgoff,
396 			  unsigned long flags);
397 extern void arch_unmap_area(struct mm_struct *, unsigned long);
398 extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
399 #else
arch_pick_mmap_layout(struct mm_struct * mm)400 static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
401 #endif
402 
403 
404 extern void set_dumpable(struct mm_struct *mm, int value);
405 extern int get_dumpable(struct mm_struct *mm);
406 
407 /* mm flags */
408 /* dumpable bits */
409 #define MMF_DUMPABLE      0  /* core dump is permitted */
410 #define MMF_DUMP_SECURELY 1  /* core file is readable only by root */
411 
412 #define MMF_DUMPABLE_BITS 2
413 #define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
414 
415 /* coredump filter bits */
416 #define MMF_DUMP_ANON_PRIVATE	2
417 #define MMF_DUMP_ANON_SHARED	3
418 #define MMF_DUMP_MAPPED_PRIVATE	4
419 #define MMF_DUMP_MAPPED_SHARED	5
420 #define MMF_DUMP_ELF_HEADERS	6
421 #define MMF_DUMP_HUGETLB_PRIVATE 7
422 #define MMF_DUMP_HUGETLB_SHARED  8
423 
424 #define MMF_DUMP_FILTER_SHIFT	MMF_DUMPABLE_BITS
425 #define MMF_DUMP_FILTER_BITS	7
426 #define MMF_DUMP_FILTER_MASK \
427 	(((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
428 #define MMF_DUMP_FILTER_DEFAULT \
429 	((1 << MMF_DUMP_ANON_PRIVATE) |	(1 << MMF_DUMP_ANON_SHARED) |\
430 	 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
431 
432 #ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
433 # define MMF_DUMP_MASK_DEFAULT_ELF	(1 << MMF_DUMP_ELF_HEADERS)
434 #else
435 # define MMF_DUMP_MASK_DEFAULT_ELF	0
436 #endif
437 					/* leave room for more dump flags */
438 #define MMF_VM_MERGEABLE	16	/* KSM may merge identical pages */
439 #define MMF_VM_HUGEPAGE		17	/* set when VM_HUGEPAGE is set on vma */
440 
441 #define MMF_INIT_MASK		(MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
442 
443 struct sighand_struct {
444 	atomic_t		count;
445 	struct k_sigaction	action[_NSIG];
446 	spinlock_t		siglock;
447 	wait_queue_head_t	signalfd_wqh;
448 };
449 
450 struct pacct_struct {
451 	int			ac_flag;
452 	long			ac_exitcode;
453 	unsigned long		ac_mem;
454 	cputime_t		ac_utime, ac_stime;
455 	unsigned long		ac_minflt, ac_majflt;
456 };
457 
458 struct cpu_itimer {
459 	cputime_t expires;
460 	cputime_t incr;
461 	u32 error;
462 	u32 incr_error;
463 };
464 
465 /**
466  * struct task_cputime - collected CPU time counts
467  * @utime:		time spent in user mode, in &cputime_t units
468  * @stime:		time spent in kernel mode, in &cputime_t units
469  * @sum_exec_runtime:	total time spent on the CPU, in nanoseconds
470  *
471  * This structure groups together three kinds of CPU time that are
472  * tracked for threads and thread groups.  Most things considering
473  * CPU time want to group these counts together and treat all three
474  * of them in parallel.
475  */
476 struct task_cputime {
477 	cputime_t utime;
478 	cputime_t stime;
479 	unsigned long long sum_exec_runtime;
480 };
481 /* Alternate field names when used to cache expirations. */
482 #define prof_exp	stime
483 #define virt_exp	utime
484 #define sched_exp	sum_exec_runtime
485 
486 #define INIT_CPUTIME	\
487 	(struct task_cputime) {					\
488 		.utime = 0,					\
489 		.stime = 0,					\
490 		.sum_exec_runtime = 0,				\
491 	}
492 
493 /*
494  * Disable preemption until the scheduler is running.
495  * Reset by start_kernel()->sched_init()->init_idle().
496  *
497  * We include PREEMPT_ACTIVE to avoid cond_resched() from working
498  * before the scheduler is active -- see should_resched().
499  */
500 #define INIT_PREEMPT_COUNT	(1 + PREEMPT_ACTIVE)
501 
502 /**
503  * struct thread_group_cputimer - thread group interval timer counts
504  * @cputime:		thread group interval timers.
505  * @running:		non-zero when there are timers running and
506  * 			@cputime receives updates.
507  * @lock:		lock for fields in this struct.
508  *
509  * This structure contains the version of task_cputime, above, that is
510  * used for thread group CPU timer calculations.
511  */
512 struct thread_group_cputimer {
513 	struct task_cputime cputime;
514 	int running;
515 	raw_spinlock_t lock;
516 };
517 
518 #include <linux/rwsem.h>
519 struct autogroup;
520 
521 /*
522  * NOTE! "signal_struct" does not have its own
523  * locking, because a shared signal_struct always
524  * implies a shared sighand_struct, so locking
525  * sighand_struct is always a proper superset of
526  * the locking of signal_struct.
527  */
528 struct signal_struct {
529 	atomic_t		sigcnt;
530 	atomic_t		live;
531 	int			nr_threads;
532 	struct list_head	thread_head;
533 
534 	wait_queue_head_t	wait_chldexit;	/* for wait4() */
535 
536 	/* current thread group signal load-balancing target: */
537 	struct task_struct	*curr_target;
538 
539 	/* shared signal handling: */
540 	struct sigpending	shared_pending;
541 
542 	/* thread group exit support */
543 	int			group_exit_code;
544 	/* overloaded:
545 	 * - notify group_exit_task when ->count is equal to notify_count
546 	 * - everyone except group_exit_task is stopped during signal delivery
547 	 *   of fatal signals, group_exit_task processes the signal.
548 	 */
549 	int			notify_count;
550 	struct task_struct	*group_exit_task;
551 
552 	/* thread group stop support, overloads group_exit_code too */
553 	int			group_stop_count;
554 	unsigned int		flags; /* see SIGNAL_* flags below */
555 
556 	/*
557 	 * PR_SET_CHILD_SUBREAPER marks a process, like a service
558 	 * manager, to re-parent orphan (double-forking) child processes
559 	 * to this process instead of 'init'. The service manager is
560 	 * able to receive SIGCHLD signals and is able to investigate
561 	 * the process until it calls wait(). All children of this
562 	 * process will inherit a flag if they should look for a
563 	 * child_subreaper process at exit.
564 	 */
565 	unsigned int		is_child_subreaper:1;
566 	unsigned int		has_child_subreaper:1;
567 
568 	/* POSIX.1b Interval Timers */
569 	struct list_head posix_timers;
570 
571 	/* ITIMER_REAL timer for the process */
572 	struct hrtimer real_timer;
573 	struct pid *leader_pid;
574 	ktime_t it_real_incr;
575 
576 	/*
577 	 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
578 	 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
579 	 * values are defined to 0 and 1 respectively
580 	 */
581 	struct cpu_itimer it[2];
582 
583 	/*
584 	 * Thread group totals for process CPU timers.
585 	 * See thread_group_cputimer(), et al, for details.
586 	 */
587 	struct thread_group_cputimer cputimer;
588 
589 	/* Earliest-expiration cache. */
590 	struct task_cputime cputime_expires;
591 
592 	struct list_head cpu_timers[3];
593 
594 	struct pid *tty_old_pgrp;
595 
596 	/* boolean value for session group leader */
597 	int leader;
598 
599 	struct tty_struct *tty; /* NULL if no tty */
600 
601 #ifdef CONFIG_SCHED_AUTOGROUP
602 	struct autogroup *autogroup;
603 #endif
604 	/*
605 	 * Cumulative resource counters for dead threads in the group,
606 	 * and for reaped dead child processes forked by this group.
607 	 * Live threads maintain their own counters and add to these
608 	 * in __exit_signal, except for the group leader.
609 	 */
610 	cputime_t utime, stime, cutime, cstime;
611 	cputime_t gtime;
612 	cputime_t cgtime;
613 #ifndef CONFIG_VIRT_CPU_ACCOUNTING
614 	cputime_t prev_utime, prev_stime;
615 #endif
616 	unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
617 	unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
618 	unsigned long inblock, oublock, cinblock, coublock;
619 	unsigned long maxrss, cmaxrss;
620 	struct task_io_accounting ioac;
621 
622 	/*
623 	 * Cumulative ns of schedule CPU time fo dead threads in the
624 	 * group, not including a zombie group leader, (This only differs
625 	 * from jiffies_to_ns(utime + stime) if sched_clock uses something
626 	 * other than jiffies.)
627 	 */
628 	unsigned long long sum_sched_runtime;
629 
630 	/*
631 	 * We don't bother to synchronize most readers of this at all,
632 	 * because there is no reader checking a limit that actually needs
633 	 * to get both rlim_cur and rlim_max atomically, and either one
634 	 * alone is a single word that can safely be read normally.
635 	 * getrlimit/setrlimit use task_lock(current->group_leader) to
636 	 * protect this instead of the siglock, because they really
637 	 * have no need to disable irqs.
638 	 */
639 	struct rlimit rlim[RLIM_NLIMITS];
640 
641 #ifdef CONFIG_BSD_PROCESS_ACCT
642 	struct pacct_struct pacct;	/* per-process accounting information */
643 #endif
644 #ifdef CONFIG_TASKSTATS
645 	struct taskstats *stats;
646 #endif
647 #ifdef CONFIG_AUDIT
648 	unsigned audit_tty;
649 	struct tty_audit_buf *tty_audit_buf;
650 #endif
651 #ifdef CONFIG_CGROUPS
652 	/*
653 	 * group_rwsem prevents new tasks from entering the threadgroup and
654 	 * member tasks from exiting,a more specifically, setting of
655 	 * PF_EXITING.  fork and exit paths are protected with this rwsem
656 	 * using threadgroup_change_begin/end().  Users which require
657 	 * threadgroup to remain stable should use threadgroup_[un]lock()
658 	 * which also takes care of exec path.  Currently, cgroup is the
659 	 * only user.
660 	 */
661 	struct rw_semaphore group_rwsem;
662 #endif
663 
664 	int oom_adj;		/* OOM kill score adjustment (bit shift) */
665 	int oom_score_adj;	/* OOM kill score adjustment */
666 	int oom_score_adj_min;	/* OOM kill score adjustment minimum value.
667 				 * Only settable by CAP_SYS_RESOURCE. */
668 
669 	struct mutex cred_guard_mutex;	/* guard against foreign influences on
670 					 * credential calculations
671 					 * (notably. ptrace) */
672 };
673 
674 /* Context switch must be unlocked if interrupts are to be enabled */
675 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
676 # define __ARCH_WANT_UNLOCKED_CTXSW
677 #endif
678 
679 /*
680  * Bits in flags field of signal_struct.
681  */
682 #define SIGNAL_STOP_STOPPED	0x00000001 /* job control stop in effect */
683 #define SIGNAL_STOP_CONTINUED	0x00000002 /* SIGCONT since WCONTINUED reap */
684 #define SIGNAL_GROUP_EXIT	0x00000004 /* group exit in progress */
685 /*
686  * Pending notifications to parent.
687  */
688 #define SIGNAL_CLD_STOPPED	0x00000010
689 #define SIGNAL_CLD_CONTINUED	0x00000020
690 #define SIGNAL_CLD_MASK		(SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
691 
692 #define SIGNAL_UNKILLABLE	0x00000040 /* for init: ignore fatal signals */
693 
694 /* If true, all threads except ->group_exit_task have pending SIGKILL */
signal_group_exit(const struct signal_struct * sig)695 static inline int signal_group_exit(const struct signal_struct *sig)
696 {
697 	return	(sig->flags & SIGNAL_GROUP_EXIT) ||
698 		(sig->group_exit_task != NULL);
699 }
700 
701 /*
702  * Some day this will be a full-fledged user tracking system..
703  */
704 struct user_struct {
705 	atomic_t __count;	/* reference count */
706 	atomic_t processes;	/* How many processes does this user have? */
707 	atomic_t files;		/* How many open files does this user have? */
708 	atomic_t sigpending;	/* How many pending signals does this user have? */
709 #ifdef CONFIG_INOTIFY_USER
710 	atomic_t inotify_watches; /* How many inotify watches does this user have? */
711 	atomic_t inotify_devs;	/* How many inotify devs does this user have opened? */
712 #endif
713 #ifdef CONFIG_FANOTIFY
714 	atomic_t fanotify_listeners;
715 #endif
716 #ifdef CONFIG_EPOLL
717 	atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
718 #endif
719 #ifdef CONFIG_POSIX_MQUEUE
720 	/* protected by mq_lock	*/
721 	unsigned long mq_bytes;	/* How many bytes can be allocated to mqueue? */
722 #endif
723 	unsigned long locked_shm; /* How many pages of mlocked shm ? */
724 
725 #ifdef CONFIG_KEYS
726 	struct key *uid_keyring;	/* UID specific keyring */
727 	struct key *session_keyring;	/* UID's default session keyring */
728 #endif
729 
730 	/* Hash table maintenance information */
731 	struct hlist_node uidhash_node;
732 	uid_t uid;
733 	struct user_namespace *user_ns;
734 
735 #ifdef CONFIG_PERF_EVENTS
736 	atomic_long_t locked_vm;
737 #endif
738 };
739 
740 extern int uids_sysfs_init(void);
741 
742 extern struct user_struct *find_user(uid_t);
743 
744 extern struct user_struct root_user;
745 #define INIT_USER (&root_user)
746 
747 
748 struct backing_dev_info;
749 struct reclaim_state;
750 
751 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
752 struct sched_info {
753 	/* cumulative counters */
754 	unsigned long pcount;	      /* # of times run on this cpu */
755 	unsigned long long run_delay; /* time spent waiting on a runqueue */
756 
757 	/* timestamps */
758 	unsigned long long last_arrival,/* when we last ran on a cpu */
759 			   last_queued;	/* when we were last queued to run */
760 };
761 #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
762 
763 #ifdef CONFIG_TASK_DELAY_ACCT
764 struct task_delay_info {
765 	spinlock_t	lock;
766 	unsigned int	flags;	/* Private per-task flags */
767 
768 	/* For each stat XXX, add following, aligned appropriately
769 	 *
770 	 * struct timespec XXX_start, XXX_end;
771 	 * u64 XXX_delay;
772 	 * u32 XXX_count;
773 	 *
774 	 * Atomicity of updates to XXX_delay, XXX_count protected by
775 	 * single lock above (split into XXX_lock if contention is an issue).
776 	 */
777 
778 	/*
779 	 * XXX_count is incremented on every XXX operation, the delay
780 	 * associated with the operation is added to XXX_delay.
781 	 * XXX_delay contains the accumulated delay time in nanoseconds.
782 	 */
783 	struct timespec blkio_start, blkio_end;	/* Shared by blkio, swapin */
784 	u64 blkio_delay;	/* wait for sync block io completion */
785 	u64 swapin_delay;	/* wait for swapin block io completion */
786 	u32 blkio_count;	/* total count of the number of sync block */
787 				/* io operations performed */
788 	u32 swapin_count;	/* total count of the number of swapin block */
789 				/* io operations performed */
790 
791 	struct timespec freepages_start, freepages_end;
792 	u64 freepages_delay;	/* wait for memory reclaim */
793 	u32 freepages_count;	/* total count of memory reclaim */
794 };
795 #endif	/* CONFIG_TASK_DELAY_ACCT */
796 
sched_info_on(void)797 static inline int sched_info_on(void)
798 {
799 #ifdef CONFIG_SCHEDSTATS
800 	return 1;
801 #elif defined(CONFIG_TASK_DELAY_ACCT)
802 	extern int delayacct_on;
803 	return delayacct_on;
804 #else
805 	return 0;
806 #endif
807 }
808 
809 enum cpu_idle_type {
810 	CPU_IDLE,
811 	CPU_NOT_IDLE,
812 	CPU_NEWLY_IDLE,
813 	CPU_MAX_IDLE_TYPES
814 };
815 
816 /*
817  * Increase resolution of nice-level calculations for 64-bit architectures.
818  * The extra resolution improves shares distribution and load balancing of
819  * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
820  * hierarchies, especially on larger systems. This is not a user-visible change
821  * and does not change the user-interface for setting shares/weights.
822  *
823  * We increase resolution only if we have enough bits to allow this increased
824  * resolution (i.e. BITS_PER_LONG > 32). The costs for increasing resolution
825  * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the
826  * increased costs.
827  */
828 #if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load  */
829 # define SCHED_LOAD_RESOLUTION	10
830 # define scale_load(w)		((w) << SCHED_LOAD_RESOLUTION)
831 # define scale_load_down(w)	((w) >> SCHED_LOAD_RESOLUTION)
832 #else
833 # define SCHED_LOAD_RESOLUTION	0
834 # define scale_load(w)		(w)
835 # define scale_load_down(w)	(w)
836 #endif
837 
838 #define SCHED_LOAD_SHIFT	(10 + SCHED_LOAD_RESOLUTION)
839 #define SCHED_LOAD_SCALE	(1L << SCHED_LOAD_SHIFT)
840 
841 /*
842  * Increase resolution of cpu_power calculations
843  */
844 #define SCHED_POWER_SHIFT	10
845 #define SCHED_POWER_SCALE	(1L << SCHED_POWER_SHIFT)
846 
847 /*
848  * sched-domains (multiprocessor balancing) declarations:
849  */
850 #ifdef CONFIG_SMP
851 #define SD_LOAD_BALANCE		0x0001	/* Do load balancing on this domain. */
852 #define SD_BALANCE_NEWIDLE	0x0002	/* Balance when about to become idle */
853 #define SD_BALANCE_EXEC		0x0004	/* Balance on exec */
854 #define SD_BALANCE_FORK		0x0008	/* Balance on fork, clone */
855 #define SD_BALANCE_WAKE		0x0010  /* Balance on wakeup */
856 #define SD_WAKE_AFFINE		0x0020	/* Wake task to waking CPU */
857 #define SD_PREFER_LOCAL		0x0040  /* Prefer to keep tasks local to this domain */
858 #define SD_SHARE_CPUPOWER	0x0080	/* Domain members share cpu power */
859 #define SD_POWERSAVINGS_BALANCE	0x0100	/* Balance for power savings */
860 #define SD_SHARE_PKG_RESOURCES	0x0200	/* Domain members share cpu pkg resources */
861 #define SD_SERIALIZE		0x0400	/* Only a single load balancing instance */
862 #define SD_ASYM_PACKING		0x0800  /* Place busy groups earlier in the domain */
863 #define SD_PREFER_SIBLING	0x1000	/* Prefer to place tasks in a sibling domain */
864 #define SD_OVERLAP		0x2000	/* sched_domains of this level overlap */
865 
866 enum powersavings_balance_level {
867 	POWERSAVINGS_BALANCE_NONE = 0,  /* No power saving load balance */
868 	POWERSAVINGS_BALANCE_BASIC,	/* Fill one thread/core/package
869 					 * first for long running threads
870 					 */
871 	POWERSAVINGS_BALANCE_WAKEUP,	/* Also bias task wakeups to semi-idle
872 					 * cpu package for power savings
873 					 */
874 	MAX_POWERSAVINGS_BALANCE_LEVELS
875 };
876 
877 extern int sched_mc_power_savings, sched_smt_power_savings;
878 
sd_balance_for_mc_power(void)879 static inline int sd_balance_for_mc_power(void)
880 {
881 	if (sched_smt_power_savings)
882 		return SD_POWERSAVINGS_BALANCE;
883 
884 	if (!sched_mc_power_savings)
885 		return SD_PREFER_SIBLING;
886 
887 	return 0;
888 }
889 
sd_balance_for_package_power(void)890 static inline int sd_balance_for_package_power(void)
891 {
892 	if (sched_mc_power_savings | sched_smt_power_savings)
893 		return SD_POWERSAVINGS_BALANCE;
894 
895 	return SD_PREFER_SIBLING;
896 }
897 
898 extern int __weak arch_sd_sibiling_asym_packing(void);
899 
900 /*
901  * Optimise SD flags for power savings:
902  * SD_BALANCE_NEWIDLE helps aggressive task consolidation and power savings.
903  * Keep default SD flags if sched_{smt,mc}_power_saving=0
904  */
905 
sd_power_saving_flags(void)906 static inline int sd_power_saving_flags(void)
907 {
908 	if (sched_mc_power_savings | sched_smt_power_savings)
909 		return SD_BALANCE_NEWIDLE;
910 
911 	return 0;
912 }
913 
914 struct sched_group_power {
915 	atomic_t ref;
916 	/*
917 	 * CPU power of this group, SCHED_LOAD_SCALE being max power for a
918 	 * single CPU.
919 	 */
920 	unsigned int power, power_orig;
921 	unsigned long next_update;
922 	/*
923 	 * Number of busy cpus in this group.
924 	 */
925 	atomic_t nr_busy_cpus;
926 };
927 
928 struct sched_group {
929 	struct sched_group *next;	/* Must be a circular list */
930 	atomic_t ref;
931 
932 	unsigned int group_weight;
933 	struct sched_group_power *sgp;
934 
935 	/*
936 	 * The CPUs this group covers.
937 	 *
938 	 * NOTE: this field is variable length. (Allocated dynamically
939 	 * by attaching extra space to the end of the structure,
940 	 * depending on how many CPUs the kernel has booted up with)
941 	 */
942 	unsigned long cpumask[0];
943 };
944 
sched_group_cpus(struct sched_group * sg)945 static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
946 {
947 	return to_cpumask(sg->cpumask);
948 }
949 
950 /**
951  * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
952  * @group: The group whose first cpu is to be returned.
953  */
group_first_cpu(struct sched_group * group)954 static inline unsigned int group_first_cpu(struct sched_group *group)
955 {
956 	return cpumask_first(sched_group_cpus(group));
957 }
958 
959 struct sched_domain_attr {
960 	int relax_domain_level;
961 };
962 
963 #define SD_ATTR_INIT	(struct sched_domain_attr) {	\
964 	.relax_domain_level = -1,			\
965 }
966 
967 extern int sched_domain_level_max;
968 
969 struct sched_domain {
970 	/* These fields must be setup */
971 	struct sched_domain *parent;	/* top domain must be null terminated */
972 	struct sched_domain *child;	/* bottom domain must be null terminated */
973 	struct sched_group *groups;	/* the balancing groups of the domain */
974 	unsigned long min_interval;	/* Minimum balance interval ms */
975 	unsigned long max_interval;	/* Maximum balance interval ms */
976 	unsigned int busy_factor;	/* less balancing by factor if busy */
977 	unsigned int imbalance_pct;	/* No balance until over watermark */
978 	unsigned int cache_nice_tries;	/* Leave cache hot tasks for # tries */
979 	unsigned int busy_idx;
980 	unsigned int idle_idx;
981 	unsigned int newidle_idx;
982 	unsigned int wake_idx;
983 	unsigned int forkexec_idx;
984 	unsigned int smt_gain;
985 	int flags;			/* See SD_* */
986 	int level;
987 
988 	/* Runtime fields. */
989 	unsigned long last_balance;	/* init to jiffies. units in jiffies */
990 	unsigned int balance_interval;	/* initialise to 1. units in ms. */
991 	unsigned int nr_balance_failed; /* initialise to 0 */
992 
993 	u64 last_update;
994 
995 #ifdef CONFIG_SCHEDSTATS
996 	/* load_balance() stats */
997 	unsigned int lb_count[CPU_MAX_IDLE_TYPES];
998 	unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
999 	unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
1000 	unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
1001 	unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
1002 	unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
1003 	unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
1004 	unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
1005 
1006 	/* Active load balancing */
1007 	unsigned int alb_count;
1008 	unsigned int alb_failed;
1009 	unsigned int alb_pushed;
1010 
1011 	/* SD_BALANCE_EXEC stats */
1012 	unsigned int sbe_count;
1013 	unsigned int sbe_balanced;
1014 	unsigned int sbe_pushed;
1015 
1016 	/* SD_BALANCE_FORK stats */
1017 	unsigned int sbf_count;
1018 	unsigned int sbf_balanced;
1019 	unsigned int sbf_pushed;
1020 
1021 	/* try_to_wake_up() stats */
1022 	unsigned int ttwu_wake_remote;
1023 	unsigned int ttwu_move_affine;
1024 	unsigned int ttwu_move_balance;
1025 #endif
1026 #ifdef CONFIG_SCHED_DEBUG
1027 	char *name;
1028 #endif
1029 	union {
1030 		void *private;		/* used during construction */
1031 		struct rcu_head rcu;	/* used during destruction */
1032 	};
1033 
1034 	unsigned int span_weight;
1035 	/*
1036 	 * Span of all CPUs in this domain.
1037 	 *
1038 	 * NOTE: this field is variable length. (Allocated dynamically
1039 	 * by attaching extra space to the end of the structure,
1040 	 * depending on how many CPUs the kernel has booted up with)
1041 	 */
1042 	unsigned long span[0];
1043 };
1044 
sched_domain_span(struct sched_domain * sd)1045 static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
1046 {
1047 	return to_cpumask(sd->span);
1048 }
1049 
1050 extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1051 				    struct sched_domain_attr *dattr_new);
1052 
1053 /* Allocate an array of sched domains, for partition_sched_domains(). */
1054 cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
1055 void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
1056 
1057 /* Test a flag in parent sched domain */
test_sd_parent(struct sched_domain * sd,int flag)1058 static inline int test_sd_parent(struct sched_domain *sd, int flag)
1059 {
1060 	if (sd->parent && (sd->parent->flags & flag))
1061 		return 1;
1062 
1063 	return 0;
1064 }
1065 
1066 unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu);
1067 unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu);
1068 
1069 bool cpus_share_cache(int this_cpu, int that_cpu);
1070 
1071 #else /* CONFIG_SMP */
1072 
1073 struct sched_domain_attr;
1074 
1075 static inline void
partition_sched_domains(int ndoms_new,cpumask_var_t doms_new[],struct sched_domain_attr * dattr_new)1076 partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1077 			struct sched_domain_attr *dattr_new)
1078 {
1079 }
1080 
cpus_share_cache(int this_cpu,int that_cpu)1081 static inline bool cpus_share_cache(int this_cpu, int that_cpu)
1082 {
1083 	return true;
1084 }
1085 
1086 #endif	/* !CONFIG_SMP */
1087 
1088 
1089 struct io_context;			/* See blkdev.h */
1090 
1091 
1092 #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
1093 extern void prefetch_stack(struct task_struct *t);
1094 #else
prefetch_stack(struct task_struct * t)1095 static inline void prefetch_stack(struct task_struct *t) { }
1096 #endif
1097 
1098 struct audit_context;		/* See audit.c */
1099 struct mempolicy;
1100 struct pipe_inode_info;
1101 struct uts_namespace;
1102 
1103 struct rq;
1104 struct sched_domain;
1105 
1106 /*
1107  * wake flags
1108  */
1109 #define WF_SYNC		0x01		/* waker goes to sleep after wakup */
1110 #define WF_FORK		0x02		/* child wakeup after fork */
1111 #define WF_MIGRATED	0x04		/* internal use, task got migrated */
1112 
1113 #define ENQUEUE_WAKEUP		1
1114 #define ENQUEUE_HEAD		2
1115 #ifdef CONFIG_SMP
1116 #define ENQUEUE_WAKING		4	/* sched_class::task_waking was called */
1117 #else
1118 #define ENQUEUE_WAKING		0
1119 #endif
1120 
1121 #define DEQUEUE_SLEEP		1
1122 
1123 struct sched_class {
1124 	const struct sched_class *next;
1125 
1126 	void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1127 	void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1128 	void (*yield_task) (struct rq *rq);
1129 	bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
1130 
1131 	void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
1132 
1133 	struct task_struct * (*pick_next_task) (struct rq *rq);
1134 	void (*put_prev_task) (struct rq *rq, struct task_struct *p);
1135 
1136 #ifdef CONFIG_SMP
1137 	int  (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
1138 
1139 	void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
1140 	void (*post_schedule) (struct rq *this_rq);
1141 	void (*task_waking) (struct task_struct *task);
1142 	void (*task_woken) (struct rq *this_rq, struct task_struct *task);
1143 
1144 	void (*set_cpus_allowed)(struct task_struct *p,
1145 				 const struct cpumask *newmask);
1146 
1147 	void (*rq_online)(struct rq *rq);
1148 	void (*rq_offline)(struct rq *rq);
1149 #endif
1150 
1151 	void (*set_curr_task) (struct rq *rq);
1152 	void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
1153 	void (*task_fork) (struct task_struct *p);
1154 
1155 	void (*switched_from) (struct rq *this_rq, struct task_struct *task);
1156 	void (*switched_to) (struct rq *this_rq, struct task_struct *task);
1157 	void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1158 			     int oldprio);
1159 
1160 	unsigned int (*get_rr_interval) (struct rq *rq,
1161 					 struct task_struct *task);
1162 
1163 #ifdef CONFIG_FAIR_GROUP_SCHED
1164 	void (*task_move_group) (struct task_struct *p, int on_rq);
1165 #endif
1166 };
1167 
1168 struct load_weight {
1169 	unsigned long weight, inv_weight;
1170 };
1171 
1172 #ifdef CONFIG_SCHEDSTATS
1173 struct sched_statistics {
1174 	u64			wait_start;
1175 	u64			wait_max;
1176 	u64			wait_count;
1177 	u64			wait_sum;
1178 	u64			iowait_count;
1179 	u64			iowait_sum;
1180 
1181 	u64			sleep_start;
1182 	u64			sleep_max;
1183 	s64			sum_sleep_runtime;
1184 
1185 	u64			block_start;
1186 	u64			block_max;
1187 	u64			exec_max;
1188 	u64			slice_max;
1189 
1190 	u64			nr_migrations_cold;
1191 	u64			nr_failed_migrations_affine;
1192 	u64			nr_failed_migrations_running;
1193 	u64			nr_failed_migrations_hot;
1194 	u64			nr_forced_migrations;
1195 
1196 	u64			nr_wakeups;
1197 	u64			nr_wakeups_sync;
1198 	u64			nr_wakeups_migrate;
1199 	u64			nr_wakeups_local;
1200 	u64			nr_wakeups_remote;
1201 	u64			nr_wakeups_affine;
1202 	u64			nr_wakeups_affine_attempts;
1203 	u64			nr_wakeups_passive;
1204 	u64			nr_wakeups_idle;
1205 };
1206 #endif
1207 
1208 struct sched_entity {
1209 	struct load_weight	load;		/* for load-balancing */
1210 	struct rb_node		run_node;
1211 	struct list_head	group_node;
1212 	unsigned int		on_rq;
1213 
1214 	u64			exec_start;
1215 	u64			sum_exec_runtime;
1216 	u64			vruntime;
1217 	u64			prev_sum_exec_runtime;
1218 
1219 	u64			nr_migrations;
1220 
1221 #ifdef CONFIG_SCHEDSTATS
1222 	struct sched_statistics statistics;
1223 #endif
1224 
1225 #ifdef CONFIG_FAIR_GROUP_SCHED
1226 	struct sched_entity	*parent;
1227 	/* rq on which this entity is (to be) queued: */
1228 	struct cfs_rq		*cfs_rq;
1229 	/* rq "owned" by this entity/group: */
1230 	struct cfs_rq		*my_q;
1231 #endif
1232 };
1233 
1234 struct sched_rt_entity {
1235 	struct list_head run_list;
1236 	unsigned long timeout;
1237 	unsigned int time_slice;
1238 	int nr_cpus_allowed;
1239 
1240 	struct sched_rt_entity *back;
1241 #ifdef CONFIG_RT_GROUP_SCHED
1242 	struct sched_rt_entity	*parent;
1243 	/* rq on which this entity is (to be) queued: */
1244 	struct rt_rq		*rt_rq;
1245 	/* rq "owned" by this entity/group: */
1246 	struct rt_rq		*my_q;
1247 #endif
1248 };
1249 
1250 /*
1251  * default timeslice is 100 msecs (used only for SCHED_RR tasks).
1252  * Timeslices get refilled after they expire.
1253  */
1254 #define RR_TIMESLICE		(100 * HZ / 1000)
1255 
1256 struct rcu_node;
1257 
1258 enum perf_event_task_context {
1259 	perf_invalid_context = -1,
1260 	perf_hw_context = 0,
1261 	perf_sw_context,
1262 	perf_nr_task_contexts,
1263 };
1264 
1265 struct task_struct {
1266 	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
1267 	void *stack;
1268 	atomic_t usage;
1269 	unsigned int flags;	/* per process flags, defined below */
1270 	unsigned int ptrace;
1271 
1272 #ifdef CONFIG_SMP
1273 	struct llist_node wake_entry;
1274 	int on_cpu;
1275 #endif
1276 	int on_rq;
1277 
1278 	int prio, static_prio, normal_prio;
1279 	unsigned int rt_priority;
1280 	const struct sched_class *sched_class;
1281 	struct sched_entity se;
1282 	struct sched_rt_entity rt;
1283 #ifdef CONFIG_CGROUP_SCHED
1284 	struct task_group *sched_task_group;
1285 #endif
1286 
1287 #ifdef CONFIG_PREEMPT_NOTIFIERS
1288 	/* list of struct preempt_notifier: */
1289 	struct hlist_head preempt_notifiers;
1290 #endif
1291 
1292 	/*
1293 	 * fpu_counter contains the number of consecutive context switches
1294 	 * that the FPU is used. If this is over a threshold, the lazy fpu
1295 	 * saving becomes unlazy to save the trap. This is an unsigned char
1296 	 * so that after 256 times the counter wraps and the behavior turns
1297 	 * lazy again; this to deal with bursty apps that only use FPU for
1298 	 * a short time
1299 	 */
1300 	unsigned char fpu_counter;
1301 #ifdef CONFIG_BLK_DEV_IO_TRACE
1302 	unsigned int btrace_seq;
1303 #endif
1304 
1305 	unsigned int policy;
1306 	cpumask_t cpus_allowed;
1307 
1308 #ifdef CONFIG_PREEMPT_RCU
1309 	int rcu_read_lock_nesting;
1310 	char rcu_read_unlock_special;
1311 	struct list_head rcu_node_entry;
1312 #endif /* #ifdef CONFIG_PREEMPT_RCU */
1313 #ifdef CONFIG_TREE_PREEMPT_RCU
1314 	struct rcu_node *rcu_blocked_node;
1315 #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1316 #ifdef CONFIG_RCU_BOOST
1317 	struct rt_mutex *rcu_boost_mutex;
1318 #endif /* #ifdef CONFIG_RCU_BOOST */
1319 
1320 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1321 	struct sched_info sched_info;
1322 #endif
1323 
1324 	struct list_head tasks;
1325 #ifdef CONFIG_SMP
1326 	struct plist_node pushable_tasks;
1327 #endif
1328 
1329 	struct mm_struct *mm, *active_mm;
1330 #ifdef CONFIG_COMPAT_BRK
1331 	unsigned brk_randomized:1;
1332 #endif
1333 #if defined(SPLIT_RSS_COUNTING)
1334 	struct task_rss_stat	rss_stat;
1335 #endif
1336 /* task state */
1337 	int exit_state;
1338 	int exit_code, exit_signal;
1339 	int pdeath_signal;  /*  The signal sent when the parent dies  */
1340 	unsigned int jobctl;	/* JOBCTL_*, siglock protected */
1341 	/* ??? */
1342 	unsigned int personality;
1343 	unsigned did_exec:1;
1344 	unsigned in_execve:1;	/* Tell the LSMs that the process is doing an
1345 				 * execve */
1346 	unsigned in_iowait:1;
1347 
1348 	/* Revert to default priority/policy when forking */
1349 	unsigned sched_reset_on_fork:1;
1350 	unsigned sched_contributes_to_load:1;
1351 
1352 #ifdef CONFIG_GENERIC_HARDIRQS
1353 	/* IRQ handler threads */
1354 	unsigned irq_thread:1;
1355 #endif
1356 
1357 	unsigned long atomic_flags; /* Flags needing atomic access. */
1358 
1359 	pid_t pid;
1360 	pid_t tgid;
1361 
1362 #ifdef CONFIG_CC_STACKPROTECTOR
1363 	/* Canary value for the -fstack-protector gcc feature */
1364 	unsigned long stack_canary;
1365 #endif
1366 
1367 	/*
1368 	 * pointers to (original) parent process, youngest child, younger sibling,
1369 	 * older sibling, respectively.  (p->father can be replaced with
1370 	 * p->real_parent->pid)
1371 	 */
1372 	struct task_struct __rcu *real_parent; /* real parent process */
1373 	struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
1374 	/*
1375 	 * children/sibling forms the list of my natural children
1376 	 */
1377 	struct list_head children;	/* list of my children */
1378 	struct list_head sibling;	/* linkage in my parent's children list */
1379 	struct task_struct *group_leader;	/* threadgroup leader */
1380 
1381 	/*
1382 	 * ptraced is the list of tasks this task is using ptrace on.
1383 	 * This includes both natural children and PTRACE_ATTACH targets.
1384 	 * p->ptrace_entry is p's link on the p->parent->ptraced list.
1385 	 */
1386 	struct list_head ptraced;
1387 	struct list_head ptrace_entry;
1388 
1389 	/* PID/PID hash table linkage. */
1390 	struct pid_link pids[PIDTYPE_MAX];
1391 	struct list_head thread_group;
1392 	struct list_head thread_node;
1393 
1394 	struct completion *vfork_done;		/* for vfork() */
1395 	int __user *set_child_tid;		/* CLONE_CHILD_SETTID */
1396 	int __user *clear_child_tid;		/* CLONE_CHILD_CLEARTID */
1397 
1398 	cputime_t utime, stime, utimescaled, stimescaled;
1399 	cputime_t gtime;
1400 #ifndef CONFIG_VIRT_CPU_ACCOUNTING
1401 	cputime_t prev_utime, prev_stime;
1402 #endif
1403 	unsigned long nvcsw, nivcsw; /* context switch counts */
1404 	struct timespec start_time; 		/* monotonic time */
1405 	struct timespec real_start_time;	/* boot based time */
1406 /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
1407 	unsigned long min_flt, maj_flt;
1408 
1409 	struct task_cputime cputime_expires;
1410 	struct list_head cpu_timers[3];
1411 
1412 /* process credentials */
1413 	const struct cred __rcu *real_cred; /* objective and real subjective task
1414 					 * credentials (COW) */
1415 	const struct cred __rcu *cred;	/* effective (overridable) subjective task
1416 					 * credentials (COW) */
1417 	struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
1418 
1419 	char comm[TASK_COMM_LEN]; /* executable name excluding path
1420 				     - access with [gs]et_task_comm (which lock
1421 				       it with task_lock())
1422 				     - initialized normally by setup_new_exec */
1423 /* file system info */
1424 	int link_count, total_link_count;
1425 #ifdef CONFIG_SYSVIPC
1426 /* ipc stuff */
1427 	struct sysv_sem sysvsem;
1428 #endif
1429 #ifdef CONFIG_DETECT_HUNG_TASK
1430 /* hung task detection */
1431 	unsigned long last_switch_count;
1432 #endif
1433 /* CPU-specific state of this task */
1434 	struct thread_struct thread;
1435 /* filesystem information */
1436 	struct fs_struct *fs;
1437 /* open file information */
1438 	struct files_struct *files;
1439 /* namespaces */
1440 	struct nsproxy *nsproxy;
1441 /* signal handlers */
1442 	struct signal_struct *signal;
1443 	struct sighand_struct *sighand;
1444 
1445 	sigset_t blocked, real_blocked;
1446 	sigset_t saved_sigmask;	/* restored if set_restore_sigmask() was used */
1447 	struct sigpending pending;
1448 
1449 	unsigned long sas_ss_sp;
1450 	size_t sas_ss_size;
1451 	int (*notifier)(void *priv);
1452 	void *notifier_data;
1453 	sigset_t *notifier_mask;
1454 	struct audit_context *audit_context;
1455 #ifdef CONFIG_AUDITSYSCALL
1456 	uid_t loginuid;
1457 	unsigned int sessionid;
1458 #endif
1459 	struct seccomp seccomp;
1460 
1461 /* Thread group tracking */
1462    	u32 parent_exec_id;
1463    	u32 self_exec_id;
1464 /* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
1465  * mempolicy */
1466 	spinlock_t alloc_lock;
1467 
1468 	/* Protection of the PI data structures: */
1469 	raw_spinlock_t pi_lock;
1470 
1471 #ifdef CONFIG_RT_MUTEXES
1472 	/* PI waiters blocked on a rt_mutex held by this task */
1473 	struct plist_head pi_waiters;
1474 	/* Deadlock detection and priority inheritance handling */
1475 	struct rt_mutex_waiter *pi_blocked_on;
1476 #endif
1477 
1478 #ifdef CONFIG_DEBUG_MUTEXES
1479 	/* mutex deadlock detection */
1480 	struct mutex_waiter *blocked_on;
1481 #endif
1482 #ifdef CONFIG_TRACE_IRQFLAGS
1483 	unsigned int irq_events;
1484 	unsigned long hardirq_enable_ip;
1485 	unsigned long hardirq_disable_ip;
1486 	unsigned int hardirq_enable_event;
1487 	unsigned int hardirq_disable_event;
1488 	int hardirqs_enabled;
1489 	int hardirq_context;
1490 	unsigned long softirq_disable_ip;
1491 	unsigned long softirq_enable_ip;
1492 	unsigned int softirq_disable_event;
1493 	unsigned int softirq_enable_event;
1494 	int softirqs_enabled;
1495 	int softirq_context;
1496 #endif
1497 #ifdef CONFIG_LOCKDEP
1498 # define MAX_LOCK_DEPTH 48UL
1499 	u64 curr_chain_key;
1500 	int lockdep_depth;
1501 	unsigned int lockdep_recursion;
1502 	struct held_lock held_locks[MAX_LOCK_DEPTH];
1503 	gfp_t lockdep_reclaim_gfp;
1504 #endif
1505 
1506 /* journalling filesystem info */
1507 	void *journal_info;
1508 
1509 /* stacked block device info */
1510 	struct bio_list *bio_list;
1511 
1512 #ifdef CONFIG_BLOCK
1513 /* stack plugging */
1514 	struct blk_plug *plug;
1515 #endif
1516 
1517 /* VM state */
1518 	struct reclaim_state *reclaim_state;
1519 
1520 	struct backing_dev_info *backing_dev_info;
1521 
1522 	struct io_context *io_context;
1523 
1524 	unsigned long ptrace_message;
1525 	siginfo_t *last_siginfo; /* For ptrace use.  */
1526 	struct task_io_accounting ioac;
1527 #if defined(CONFIG_TASK_XACCT)
1528 	u64 acct_rss_mem1;	/* accumulated rss usage */
1529 	u64 acct_vm_mem1;	/* accumulated virtual memory usage */
1530 	cputime_t acct_timexpd;	/* stime + utime since last update */
1531 #endif
1532 #ifdef CONFIG_CPUSETS
1533 	nodemask_t mems_allowed;	/* Protected by alloc_lock */
1534 	seqcount_t mems_allowed_seq;	/* Seqence no to catch updates */
1535 	int cpuset_mem_spread_rotor;
1536 	int cpuset_slab_spread_rotor;
1537 #endif
1538 #ifdef CONFIG_CGROUPS
1539 	/* Control Group info protected by css_set_lock */
1540 	struct css_set __rcu *cgroups;
1541 	/* cg_list protected by css_set_lock and tsk->alloc_lock */
1542 	struct list_head cg_list;
1543 #endif
1544 #ifdef CONFIG_FUTEX
1545 	struct robust_list_head __user *robust_list;
1546 #ifdef CONFIG_COMPAT
1547 	struct compat_robust_list_head __user *compat_robust_list;
1548 #endif
1549 	struct list_head pi_state_list;
1550 	struct futex_pi_state *pi_state_cache;
1551 #endif
1552 #ifdef CONFIG_PERF_EVENTS
1553 	struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1554 	struct mutex perf_event_mutex;
1555 	struct list_head perf_event_list;
1556 #endif
1557 #ifdef CONFIG_NUMA
1558 	struct mempolicy *mempolicy;	/* Protected by alloc_lock */
1559 	short il_next;
1560 	short pref_node_fork;
1561 #endif
1562 	struct rcu_head rcu;
1563 
1564 	/*
1565 	 * cache last used pipe for splice
1566 	 */
1567 	struct pipe_inode_info *splice_pipe;
1568 #ifdef	CONFIG_TASK_DELAY_ACCT
1569 	struct task_delay_info *delays;
1570 #endif
1571 #ifdef CONFIG_FAULT_INJECTION
1572 	int make_it_fail;
1573 #endif
1574 	/*
1575 	 * when (nr_dirtied >= nr_dirtied_pause), it's time to call
1576 	 * balance_dirty_pages() for some dirty throttling pause
1577 	 */
1578 	int nr_dirtied;
1579 	int nr_dirtied_pause;
1580 	unsigned long dirty_paused_when; /* start of a write-and-pause period */
1581 
1582 #ifdef CONFIG_LATENCYTOP
1583 	int latency_record_count;
1584 	struct latency_record latency_record[LT_SAVECOUNT];
1585 #endif
1586 	/*
1587 	 * time slack values; these are used to round up poll() and
1588 	 * select() etc timeout values. These are in nanoseconds.
1589 	 */
1590 	unsigned long timer_slack_ns;
1591 	unsigned long default_timer_slack_ns;
1592 
1593 	struct list_head	*scm_work_list;
1594 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1595 	/* Index of current stored address in ret_stack */
1596 	int curr_ret_stack;
1597 	/* Stack of return addresses for return function tracing */
1598 	struct ftrace_ret_stack	*ret_stack;
1599 	/* time stamp for last schedule */
1600 	unsigned long long ftrace_timestamp;
1601 	/*
1602 	 * Number of functions that haven't been traced
1603 	 * because of depth overrun.
1604 	 */
1605 	atomic_t trace_overrun;
1606 	/* Pause for the tracing */
1607 	atomic_t tracing_graph_pause;
1608 #endif
1609 #ifdef CONFIG_TRACING
1610 	/* state flags for use by tracers */
1611 	unsigned long trace;
1612 	/* bitmask and counter of trace recursion */
1613 	unsigned long trace_recursion;
1614 #endif /* CONFIG_TRACING */
1615 #ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */
1616 	struct memcg_batch_info {
1617 		int do_batch;	/* incremented when batch uncharge started */
1618 		struct mem_cgroup *memcg; /* target memcg of uncharge */
1619 		unsigned long nr_pages;	/* uncharged usage */
1620 		unsigned long memsw_nr_pages; /* uncharged mem+swap usage */
1621 	} memcg_batch;
1622 #endif
1623 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1624 	atomic_t ptrace_bp_refcnt;
1625 #endif
1626 };
1627 
1628 /* Future-safe accessor for struct task_struct's cpus_allowed. */
1629 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
1630 
1631 /*
1632  * Priority of a process goes from 0..MAX_PRIO-1, valid RT
1633  * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
1634  * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
1635  * values are inverted: lower p->prio value means higher priority.
1636  *
1637  * The MAX_USER_RT_PRIO value allows the actual maximum
1638  * RT priority to be separate from the value exported to
1639  * user-space.  This allows kernel threads to set their
1640  * priority to a value higher than any user task. Note:
1641  * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
1642  */
1643 
1644 #define MAX_USER_RT_PRIO	100
1645 #define MAX_RT_PRIO		MAX_USER_RT_PRIO
1646 
1647 #define MAX_PRIO		(MAX_RT_PRIO + 40)
1648 #define DEFAULT_PRIO		(MAX_RT_PRIO + 20)
1649 
rt_prio(int prio)1650 static inline int rt_prio(int prio)
1651 {
1652 	if (unlikely(prio < MAX_RT_PRIO))
1653 		return 1;
1654 	return 0;
1655 }
1656 
rt_task(struct task_struct * p)1657 static inline int rt_task(struct task_struct *p)
1658 {
1659 	return rt_prio(p->prio);
1660 }
1661 
task_pid(struct task_struct * task)1662 static inline struct pid *task_pid(struct task_struct *task)
1663 {
1664 	return task->pids[PIDTYPE_PID].pid;
1665 }
1666 
task_tgid(struct task_struct * task)1667 static inline struct pid *task_tgid(struct task_struct *task)
1668 {
1669 	return task->group_leader->pids[PIDTYPE_PID].pid;
1670 }
1671 
1672 /*
1673  * Without tasklist or rcu lock it is not safe to dereference
1674  * the result of task_pgrp/task_session even if task == current,
1675  * we can race with another thread doing sys_setsid/sys_setpgid.
1676  */
task_pgrp(struct task_struct * task)1677 static inline struct pid *task_pgrp(struct task_struct *task)
1678 {
1679 	return task->group_leader->pids[PIDTYPE_PGID].pid;
1680 }
1681 
task_session(struct task_struct * task)1682 static inline struct pid *task_session(struct task_struct *task)
1683 {
1684 	return task->group_leader->pids[PIDTYPE_SID].pid;
1685 }
1686 
1687 struct pid_namespace;
1688 
1689 /*
1690  * the helpers to get the task's different pids as they are seen
1691  * from various namespaces
1692  *
1693  * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
1694  * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
1695  *                     current.
1696  * task_xid_nr_ns()  : id seen from the ns specified;
1697  *
1698  * set_task_vxid()   : assigns a virtual id to a task;
1699  *
1700  * see also pid_nr() etc in include/linux/pid.h
1701  */
1702 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
1703 			struct pid_namespace *ns);
1704 
task_pid_nr(struct task_struct * tsk)1705 static inline pid_t task_pid_nr(struct task_struct *tsk)
1706 {
1707 	return tsk->pid;
1708 }
1709 
task_pid_nr_ns(struct task_struct * tsk,struct pid_namespace * ns)1710 static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
1711 					struct pid_namespace *ns)
1712 {
1713 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1714 }
1715 
task_pid_vnr(struct task_struct * tsk)1716 static inline pid_t task_pid_vnr(struct task_struct *tsk)
1717 {
1718 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1719 }
1720 
1721 
task_tgid_nr(struct task_struct * tsk)1722 static inline pid_t task_tgid_nr(struct task_struct *tsk)
1723 {
1724 	return tsk->tgid;
1725 }
1726 
1727 pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1728 
task_tgid_vnr(struct task_struct * tsk)1729 static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1730 {
1731 	return pid_vnr(task_tgid(tsk));
1732 }
1733 
1734 
task_pgrp_nr_ns(struct task_struct * tsk,struct pid_namespace * ns)1735 static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
1736 					struct pid_namespace *ns)
1737 {
1738 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1739 }
1740 
task_pgrp_vnr(struct task_struct * tsk)1741 static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1742 {
1743 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1744 }
1745 
1746 
task_session_nr_ns(struct task_struct * tsk,struct pid_namespace * ns)1747 static inline pid_t task_session_nr_ns(struct task_struct *tsk,
1748 					struct pid_namespace *ns)
1749 {
1750 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1751 }
1752 
task_session_vnr(struct task_struct * tsk)1753 static inline pid_t task_session_vnr(struct task_struct *tsk)
1754 {
1755 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1756 }
1757 
1758 /* obsolete, do not use */
task_pgrp_nr(struct task_struct * tsk)1759 static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1760 {
1761 	return task_pgrp_nr_ns(tsk, &init_pid_ns);
1762 }
1763 
1764 /**
1765  * pid_alive - check that a task structure is not stale
1766  * @p: Task structure to be checked.
1767  *
1768  * Test if a process is not yet dead (at most zombie state)
1769  * If pid_alive fails, then pointers within the task structure
1770  * can be stale and must not be dereferenced.
1771  */
pid_alive(struct task_struct * p)1772 static inline int pid_alive(struct task_struct *p)
1773 {
1774 	return p->pids[PIDTYPE_PID].pid != NULL;
1775 }
1776 
1777 /**
1778  * is_global_init - check if a task structure is init
1779  * @tsk: Task structure to be checked.
1780  *
1781  * Check if a task structure is the first user space task the kernel created.
1782  */
is_global_init(struct task_struct * tsk)1783 static inline int is_global_init(struct task_struct *tsk)
1784 {
1785 	return tsk->pid == 1;
1786 }
1787 
1788 /*
1789  * is_container_init:
1790  * check whether in the task is init in its own pid namespace.
1791  */
1792 extern int is_container_init(struct task_struct *tsk);
1793 
1794 extern struct pid *cad_pid;
1795 
1796 extern void free_task(struct task_struct *tsk);
1797 #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
1798 
1799 extern void __put_task_struct(struct task_struct *t);
1800 
put_task_struct(struct task_struct * t)1801 static inline void put_task_struct(struct task_struct *t)
1802 {
1803 	if (atomic_dec_and_test(&t->usage))
1804 		__put_task_struct(t);
1805 }
1806 
1807 extern void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
1808 extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
1809 
1810 extern int task_free_register(struct notifier_block *n);
1811 extern int task_free_unregister(struct notifier_block *n);
1812 
1813 /*
1814  * Per process flags
1815  */
1816 #define PF_EXITING	0x00000004	/* getting shut down */
1817 #define PF_EXITPIDONE	0x00000008	/* pi exit done on shut down */
1818 #define PF_VCPU		0x00000010	/* I'm a virtual CPU */
1819 #define PF_WQ_WORKER	0x00000020	/* I'm a workqueue worker */
1820 #define PF_FORKNOEXEC	0x00000040	/* forked but didn't exec */
1821 #define PF_MCE_PROCESS  0x00000080      /* process policy on mce errors */
1822 #define PF_SUPERPRIV	0x00000100	/* used super-user privileges */
1823 #define PF_DUMPCORE	0x00000200	/* dumped core */
1824 #define PF_SIGNALED	0x00000400	/* killed by a signal */
1825 #define PF_MEMALLOC	0x00000800	/* Allocating memory */
1826 #define PF_NPROC_EXCEEDED 0x00001000	/* set_user noticed that RLIMIT_NPROC was exceeded */
1827 #define PF_USED_MATH	0x00002000	/* if unset the fpu must be initialized before use */
1828 #define PF_NOFREEZE	0x00008000	/* this thread should not be frozen */
1829 #define PF_FROZEN	0x00010000	/* frozen for system suspend */
1830 #define PF_FSTRANS	0x00020000	/* inside a filesystem transaction */
1831 #define PF_KSWAPD	0x00040000	/* I am kswapd */
1832 #define PF_LESS_THROTTLE 0x00100000	/* Throttle me less: I clean memory */
1833 #define PF_KTHREAD	0x00200000	/* I am a kernel thread */
1834 #define PF_RANDOMIZE	0x00400000	/* randomize virtual address space */
1835 #define PF_SWAPWRITE	0x00800000	/* Allowed to write to swap */
1836 #define PF_SPREAD_PAGE	0x01000000	/* Spread page cache over cpuset */
1837 #define PF_SPREAD_SLAB	0x02000000	/* Spread some slab caches over cpuset */
1838 #define PF_THREAD_BOUND	0x04000000	/* Thread bound to specific cpu */
1839 #define PF_MCE_EARLY    0x08000000      /* Early kill for mce process policy */
1840 #define PF_MEMPOLICY	0x10000000	/* Non-default NUMA mempolicy */
1841 #define PF_MUTEX_TESTER	0x20000000	/* Thread belongs to the rt mutex tester */
1842 #define PF_FREEZER_SKIP	0x40000000	/* Freezer should not count it as freezable */
1843 
1844 /*
1845  * Only the _current_ task can read/write to tsk->flags, but other
1846  * tasks can access tsk->flags in readonly mode for example
1847  * with tsk_used_math (like during threaded core dumping).
1848  * There is however an exception to this rule during ptrace
1849  * or during fork: the ptracer task is allowed to write to the
1850  * child->flags of its traced child (same goes for fork, the parent
1851  * can write to the child->flags), because we're guaranteed the
1852  * child is not running and in turn not changing child->flags
1853  * at the same time the parent does it.
1854  */
1855 #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1856 #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1857 #define clear_used_math() clear_stopped_child_used_math(current)
1858 #define set_used_math() set_stopped_child_used_math(current)
1859 #define conditional_stopped_child_used_math(condition, child) \
1860 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1861 #define conditional_used_math(condition) \
1862 	conditional_stopped_child_used_math(condition, current)
1863 #define copy_to_stopped_child_used_math(child) \
1864 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1865 /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
1866 #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1867 #define used_math() tsk_used_math(current)
1868 
1869 /* Per-process atomic flags. */
1870 #define PFA_NO_NEW_PRIVS 0x00000001	/* May not gain new privileges. */
1871 
task_no_new_privs(struct task_struct * p)1872 static inline bool task_no_new_privs(struct task_struct *p)
1873 {
1874 	return test_bit(PFA_NO_NEW_PRIVS, &p->atomic_flags);
1875 }
1876 
task_set_no_new_privs(struct task_struct * p)1877 static inline void task_set_no_new_privs(struct task_struct *p)
1878 {
1879 	set_bit(PFA_NO_NEW_PRIVS, &p->atomic_flags);
1880 }
1881 
1882 /*
1883  * task->jobctl flags
1884  */
1885 #define JOBCTL_STOP_SIGMASK	0xffff	/* signr of the last group stop */
1886 
1887 #define JOBCTL_STOP_DEQUEUED_BIT 16	/* stop signal dequeued */
1888 #define JOBCTL_STOP_PENDING_BIT	17	/* task should stop for group stop */
1889 #define JOBCTL_STOP_CONSUME_BIT	18	/* consume group stop count */
1890 #define JOBCTL_TRAP_STOP_BIT	19	/* trap for STOP */
1891 #define JOBCTL_TRAP_NOTIFY_BIT	20	/* trap for NOTIFY */
1892 #define JOBCTL_TRAPPING_BIT	21	/* switching to TRACED */
1893 #define JOBCTL_LISTENING_BIT	22	/* ptracer is listening for events */
1894 
1895 #define JOBCTL_STOP_DEQUEUED	(1 << JOBCTL_STOP_DEQUEUED_BIT)
1896 #define JOBCTL_STOP_PENDING	(1 << JOBCTL_STOP_PENDING_BIT)
1897 #define JOBCTL_STOP_CONSUME	(1 << JOBCTL_STOP_CONSUME_BIT)
1898 #define JOBCTL_TRAP_STOP	(1 << JOBCTL_TRAP_STOP_BIT)
1899 #define JOBCTL_TRAP_NOTIFY	(1 << JOBCTL_TRAP_NOTIFY_BIT)
1900 #define JOBCTL_TRAPPING		(1 << JOBCTL_TRAPPING_BIT)
1901 #define JOBCTL_LISTENING	(1 << JOBCTL_LISTENING_BIT)
1902 
1903 #define JOBCTL_TRAP_MASK	(JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
1904 #define JOBCTL_PENDING_MASK	(JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
1905 
1906 extern bool task_set_jobctl_pending(struct task_struct *task,
1907 				    unsigned int mask);
1908 extern void task_clear_jobctl_trapping(struct task_struct *task);
1909 extern void task_clear_jobctl_pending(struct task_struct *task,
1910 				      unsigned int mask);
1911 
1912 #ifdef CONFIG_PREEMPT_RCU
1913 
1914 #define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
1915 #define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
1916 
rcu_copy_process(struct task_struct * p)1917 static inline void rcu_copy_process(struct task_struct *p)
1918 {
1919 	p->rcu_read_lock_nesting = 0;
1920 	p->rcu_read_unlock_special = 0;
1921 #ifdef CONFIG_TREE_PREEMPT_RCU
1922 	p->rcu_blocked_node = NULL;
1923 #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1924 #ifdef CONFIG_RCU_BOOST
1925 	p->rcu_boost_mutex = NULL;
1926 #endif /* #ifdef CONFIG_RCU_BOOST */
1927 	INIT_LIST_HEAD(&p->rcu_node_entry);
1928 }
1929 
1930 #else
1931 
rcu_copy_process(struct task_struct * p)1932 static inline void rcu_copy_process(struct task_struct *p)
1933 {
1934 }
1935 
1936 #endif
1937 
1938 #ifdef CONFIG_SMP
1939 extern void do_set_cpus_allowed(struct task_struct *p,
1940 			       const struct cpumask *new_mask);
1941 
1942 extern int set_cpus_allowed_ptr(struct task_struct *p,
1943 				const struct cpumask *new_mask);
1944 #else
do_set_cpus_allowed(struct task_struct * p,const struct cpumask * new_mask)1945 static inline void do_set_cpus_allowed(struct task_struct *p,
1946 				      const struct cpumask *new_mask)
1947 {
1948 }
set_cpus_allowed_ptr(struct task_struct * p,const struct cpumask * new_mask)1949 static inline int set_cpus_allowed_ptr(struct task_struct *p,
1950 				       const struct cpumask *new_mask)
1951 {
1952 	if (!cpumask_test_cpu(0, new_mask))
1953 		return -EINVAL;
1954 	return 0;
1955 }
1956 #endif
1957 
1958 #ifdef CONFIG_NO_HZ
1959 void calc_load_enter_idle(void);
1960 void calc_load_exit_idle(void);
1961 #else
calc_load_enter_idle(void)1962 static inline void calc_load_enter_idle(void) { }
calc_load_exit_idle(void)1963 static inline void calc_load_exit_idle(void) { }
1964 #endif /* CONFIG_NO_HZ */
1965 
1966 #ifndef CONFIG_CPUMASK_OFFSTACK
set_cpus_allowed(struct task_struct * p,cpumask_t new_mask)1967 static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1968 {
1969 	return set_cpus_allowed_ptr(p, &new_mask);
1970 }
1971 #endif
1972 
1973 /*
1974  * Do not use outside of architecture code which knows its limitations.
1975  *
1976  * sched_clock() has no promise of monotonicity or bounded drift between
1977  * CPUs, use (which you should not) requires disabling IRQs.
1978  *
1979  * Please use one of the three interfaces below.
1980  */
1981 extern unsigned long long notrace sched_clock(void);
1982 /*
1983  * See the comment in kernel/sched_clock.c
1984  */
1985 extern u64 cpu_clock(int cpu);
1986 extern u64 local_clock(void);
1987 extern u64 sched_clock_cpu(int cpu);
1988 
1989 
1990 extern void sched_clock_init(void);
1991 
1992 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
sched_clock_tick(void)1993 static inline void sched_clock_tick(void)
1994 {
1995 }
1996 
sched_clock_idle_sleep_event(void)1997 static inline void sched_clock_idle_sleep_event(void)
1998 {
1999 }
2000 
sched_clock_idle_wakeup_event(u64 delta_ns)2001 static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
2002 {
2003 }
2004 #else
2005 /*
2006  * Architectures can set this to 1 if they have specified
2007  * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
2008  * but then during bootup it turns out that sched_clock()
2009  * is reliable after all:
2010  */
2011 extern int sched_clock_stable;
2012 
2013 extern void sched_clock_tick(void);
2014 extern void sched_clock_idle_sleep_event(void);
2015 extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2016 #endif
2017 
2018 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
2019 /*
2020  * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
2021  * The reason for this explicit opt-in is not to have perf penalty with
2022  * slow sched_clocks.
2023  */
2024 extern void enable_sched_clock_irqtime(void);
2025 extern void disable_sched_clock_irqtime(void);
2026 #else
enable_sched_clock_irqtime(void)2027 static inline void enable_sched_clock_irqtime(void) {}
disable_sched_clock_irqtime(void)2028 static inline void disable_sched_clock_irqtime(void) {}
2029 #endif
2030 
2031 extern unsigned long long
2032 task_sched_runtime(struct task_struct *task);
2033 
2034 /* sched_exec is called by processes performing an exec */
2035 #ifdef CONFIG_SMP
2036 extern void sched_exec(void);
2037 #else
2038 #define sched_exec()   {}
2039 #endif
2040 
2041 extern void sched_clock_idle_sleep_event(void);
2042 extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2043 
2044 #ifdef CONFIG_HOTPLUG_CPU
2045 extern void idle_task_exit(void);
2046 #else
idle_task_exit(void)2047 static inline void idle_task_exit(void) {}
2048 #endif
2049 
2050 #if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
2051 extern void wake_up_idle_cpu(int cpu);
2052 #else
wake_up_idle_cpu(int cpu)2053 static inline void wake_up_idle_cpu(int cpu) { }
2054 #endif
2055 
2056 extern unsigned int sysctl_sched_latency;
2057 extern unsigned int sysctl_sched_min_granularity;
2058 extern unsigned int sysctl_sched_wakeup_granularity;
2059 extern unsigned int sysctl_sched_child_runs_first;
2060 
2061 enum sched_tunable_scaling {
2062 	SCHED_TUNABLESCALING_NONE,
2063 	SCHED_TUNABLESCALING_LOG,
2064 	SCHED_TUNABLESCALING_LINEAR,
2065 	SCHED_TUNABLESCALING_END,
2066 };
2067 extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
2068 
2069 #ifdef CONFIG_SCHED_DEBUG
2070 extern unsigned int sysctl_sched_migration_cost;
2071 extern unsigned int sysctl_sched_nr_migrate;
2072 extern unsigned int sysctl_sched_time_avg;
2073 extern unsigned int sysctl_timer_migration;
2074 extern unsigned int sysctl_sched_shares_window;
2075 
2076 int sched_proc_update_handler(struct ctl_table *table, int write,
2077 		void __user *buffer, size_t *length,
2078 		loff_t *ppos);
2079 #endif
2080 #ifdef CONFIG_SCHED_DEBUG
get_sysctl_timer_migration(void)2081 static inline unsigned int get_sysctl_timer_migration(void)
2082 {
2083 	return sysctl_timer_migration;
2084 }
2085 #else
get_sysctl_timer_migration(void)2086 static inline unsigned int get_sysctl_timer_migration(void)
2087 {
2088 	return 1;
2089 }
2090 #endif
2091 extern unsigned int sysctl_sched_rt_period;
2092 extern int sysctl_sched_rt_runtime;
2093 
2094 int sched_rt_handler(struct ctl_table *table, int write,
2095 		void __user *buffer, size_t *lenp,
2096 		loff_t *ppos);
2097 
2098 #ifdef CONFIG_SCHED_AUTOGROUP
2099 extern unsigned int sysctl_sched_autogroup_enabled;
2100 
2101 extern void sched_autogroup_create_attach(struct task_struct *p);
2102 extern void sched_autogroup_detach(struct task_struct *p);
2103 extern void sched_autogroup_fork(struct signal_struct *sig);
2104 extern void sched_autogroup_exit(struct signal_struct *sig);
2105 #ifdef CONFIG_PROC_FS
2106 extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
2107 extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
2108 #endif
2109 #else
sched_autogroup_create_attach(struct task_struct * p)2110 static inline void sched_autogroup_create_attach(struct task_struct *p) { }
sched_autogroup_detach(struct task_struct * p)2111 static inline void sched_autogroup_detach(struct task_struct *p) { }
sched_autogroup_fork(struct signal_struct * sig)2112 static inline void sched_autogroup_fork(struct signal_struct *sig) { }
sched_autogroup_exit(struct signal_struct * sig)2113 static inline void sched_autogroup_exit(struct signal_struct *sig) { }
2114 #endif
2115 
2116 #ifdef CONFIG_CFS_BANDWIDTH
2117 extern unsigned int sysctl_sched_cfs_bandwidth_slice;
2118 #endif
2119 
2120 #ifdef CONFIG_RT_MUTEXES
2121 extern int rt_mutex_getprio(struct task_struct *p);
2122 extern void rt_mutex_setprio(struct task_struct *p, int prio);
2123 extern void rt_mutex_adjust_pi(struct task_struct *p);
tsk_is_pi_blocked(struct task_struct * tsk)2124 static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
2125 {
2126 	return tsk->pi_blocked_on != NULL;
2127 }
2128 #else
rt_mutex_getprio(struct task_struct * p)2129 static inline int rt_mutex_getprio(struct task_struct *p)
2130 {
2131 	return p->normal_prio;
2132 }
2133 # define rt_mutex_adjust_pi(p)		do { } while (0)
tsk_is_pi_blocked(struct task_struct * tsk)2134 static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
2135 {
2136 	return false;
2137 }
2138 #endif
2139 
2140 extern bool yield_to(struct task_struct *p, bool preempt);
2141 extern void set_user_nice(struct task_struct *p, long nice);
2142 extern int task_prio(const struct task_struct *p);
2143 extern int task_nice(const struct task_struct *p);
2144 extern int can_nice(const struct task_struct *p, const int nice);
2145 extern int task_curr(const struct task_struct *p);
2146 extern int idle_cpu(int cpu);
2147 extern int sched_setscheduler(struct task_struct *, int,
2148 			      const struct sched_param *);
2149 extern int sched_setscheduler_nocheck(struct task_struct *, int,
2150 				      const struct sched_param *);
2151 extern struct task_struct *idle_task(int cpu);
2152 /**
2153  * is_idle_task - is the specified task an idle task?
2154  * @p: the task in question.
2155  */
is_idle_task(const struct task_struct * p)2156 static inline bool is_idle_task(const struct task_struct *p)
2157 {
2158 	return p->pid == 0;
2159 }
2160 extern struct task_struct *curr_task(int cpu);
2161 extern void set_curr_task(int cpu, struct task_struct *p);
2162 
2163 void yield(void);
2164 
2165 /*
2166  * The default (Linux) execution domain.
2167  */
2168 extern struct exec_domain	default_exec_domain;
2169 
2170 union thread_union {
2171 	struct thread_info thread_info;
2172 	unsigned long stack[THREAD_SIZE/sizeof(long)];
2173 };
2174 
2175 #ifndef __HAVE_ARCH_KSTACK_END
kstack_end(void * addr)2176 static inline int kstack_end(void *addr)
2177 {
2178 	/* Reliable end of stack detection:
2179 	 * Some APM bios versions misalign the stack
2180 	 */
2181 	return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
2182 }
2183 #endif
2184 
2185 extern union thread_union init_thread_union;
2186 extern struct task_struct init_task;
2187 
2188 extern struct   mm_struct init_mm;
2189 
2190 extern struct pid_namespace init_pid_ns;
2191 
2192 /*
2193  * find a task by one of its numerical ids
2194  *
2195  * find_task_by_pid_ns():
2196  *      finds a task by its pid in the specified namespace
2197  * find_task_by_vpid():
2198  *      finds a task by its virtual pid
2199  *
2200  * see also find_vpid() etc in include/linux/pid.h
2201  */
2202 
2203 extern struct task_struct *find_task_by_vpid(pid_t nr);
2204 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
2205 		struct pid_namespace *ns);
2206 
2207 extern void __set_special_pids(struct pid *pid);
2208 
2209 /* per-UID process charging. */
2210 extern struct user_struct * alloc_uid(struct user_namespace *, uid_t);
get_uid(struct user_struct * u)2211 static inline struct user_struct *get_uid(struct user_struct *u)
2212 {
2213 	atomic_inc(&u->__count);
2214 	return u;
2215 }
2216 extern void free_uid(struct user_struct *);
2217 extern void release_uids(struct user_namespace *ns);
2218 
2219 #include <asm/current.h>
2220 
2221 extern void xtime_update(unsigned long ticks);
2222 
2223 extern int wake_up_state(struct task_struct *tsk, unsigned int state);
2224 extern int wake_up_process(struct task_struct *tsk);
2225 extern void wake_up_new_task(struct task_struct *tsk);
2226 #ifdef CONFIG_SMP
2227  extern void kick_process(struct task_struct *tsk);
2228 #else
kick_process(struct task_struct * tsk)2229  static inline void kick_process(struct task_struct *tsk) { }
2230 #endif
2231 extern void sched_fork(struct task_struct *p);
2232 extern void sched_dead(struct task_struct *p);
2233 
2234 extern void proc_caches_init(void);
2235 extern void flush_signals(struct task_struct *);
2236 extern void __flush_signals(struct task_struct *);
2237 extern void ignore_signals(struct task_struct *);
2238 extern void flush_signal_handlers(struct task_struct *, int force_default);
2239 extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
2240 
dequeue_signal_lock(struct task_struct * tsk,sigset_t * mask,siginfo_t * info)2241 static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
2242 {
2243 	unsigned long flags;
2244 	int ret;
2245 
2246 	spin_lock_irqsave(&tsk->sighand->siglock, flags);
2247 	ret = dequeue_signal(tsk, mask, info);
2248 	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
2249 
2250 	return ret;
2251 }
2252 
2253 extern void block_all_signals(int (*notifier)(void *priv), void *priv,
2254 			      sigset_t *mask);
2255 extern void unblock_all_signals(void);
2256 extern void release_task(struct task_struct * p);
2257 extern int send_sig_info(int, struct siginfo *, struct task_struct *);
2258 extern int force_sigsegv(int, struct task_struct *);
2259 extern int force_sig_info(int, struct siginfo *, struct task_struct *);
2260 extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
2261 extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
2262 extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
2263 				const struct cred *, u32);
2264 extern int kill_pgrp(struct pid *pid, int sig, int priv);
2265 extern int kill_pid(struct pid *pid, int sig, int priv);
2266 extern int kill_proc_info(int, struct siginfo *, pid_t);
2267 extern __must_check bool do_notify_parent(struct task_struct *, int);
2268 extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
2269 extern void force_sig(int, struct task_struct *);
2270 extern int send_sig(int, struct task_struct *, int);
2271 extern int zap_other_threads(struct task_struct *p);
2272 extern struct sigqueue *sigqueue_alloc(void);
2273 extern void sigqueue_free(struct sigqueue *);
2274 extern int send_sigqueue(struct sigqueue *,  struct task_struct *, int group);
2275 extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
2276 extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long);
2277 
kill_cad_pid(int sig,int priv)2278 static inline int kill_cad_pid(int sig, int priv)
2279 {
2280 	return kill_pid(cad_pid, sig, priv);
2281 }
2282 
2283 /* These can be the second arg to send_sig_info/send_group_sig_info.  */
2284 #define SEND_SIG_NOINFO ((struct siginfo *) 0)
2285 #define SEND_SIG_PRIV	((struct siginfo *) 1)
2286 #define SEND_SIG_FORCED	((struct siginfo *) 2)
2287 
2288 /*
2289  * True if we are on the alternate signal stack.
2290  */
on_sig_stack(unsigned long sp)2291 static inline int on_sig_stack(unsigned long sp)
2292 {
2293 #ifdef CONFIG_STACK_GROWSUP
2294 	return sp >= current->sas_ss_sp &&
2295 		sp - current->sas_ss_sp < current->sas_ss_size;
2296 #else
2297 	return sp > current->sas_ss_sp &&
2298 		sp - current->sas_ss_sp <= current->sas_ss_size;
2299 #endif
2300 }
2301 
sas_ss_flags(unsigned long sp)2302 static inline int sas_ss_flags(unsigned long sp)
2303 {
2304 	return (current->sas_ss_size == 0 ? SS_DISABLE
2305 		: on_sig_stack(sp) ? SS_ONSTACK : 0);
2306 }
2307 
2308 /*
2309  * Routines for handling mm_structs
2310  */
2311 extern struct mm_struct * mm_alloc(void);
2312 
2313 /* mmdrop drops the mm and the page tables */
2314 extern void __mmdrop(struct mm_struct *);
mmdrop(struct mm_struct * mm)2315 static inline void mmdrop(struct mm_struct * mm)
2316 {
2317 	if (unlikely(atomic_dec_and_test(&mm->mm_count)))
2318 		__mmdrop(mm);
2319 }
2320 
2321 /* mmput gets rid of the mappings and all user-space */
2322 extern void mmput(struct mm_struct *);
2323 /* Grab a reference to a task's mm, if it is not already going away */
2324 extern struct mm_struct *get_task_mm(struct task_struct *task);
2325 /*
2326  * Grab a reference to a task's mm, if it is not already going away
2327  * and ptrace_may_access with the mode parameter passed to it
2328  * succeeds.
2329  */
2330 extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
2331 /* Remove the current tasks stale references to the old mm_struct */
2332 extern void mm_release(struct task_struct *, struct mm_struct *);
2333 /* Allocate a new mm structure and copy contents from tsk->mm */
2334 extern struct mm_struct *dup_mm(struct task_struct *tsk);
2335 
2336 extern int copy_thread(unsigned long, unsigned long, unsigned long,
2337 			struct task_struct *, struct pt_regs *);
2338 extern void flush_thread(void);
2339 extern void exit_thread(void);
2340 
2341 extern void exit_files(struct task_struct *);
2342 extern void __cleanup_sighand(struct sighand_struct *);
2343 
2344 extern void exit_itimers(struct signal_struct *);
2345 extern void flush_itimer_signals(void);
2346 
2347 extern void do_group_exit(int);
2348 
2349 extern void daemonize(const char *, ...);
2350 extern int allow_signal(int);
2351 extern int disallow_signal(int);
2352 
2353 extern int do_execve(const char *,
2354 		     const char __user * const __user *,
2355 		     const char __user * const __user *, struct pt_regs *);
2356 extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *);
2357 struct task_struct *fork_idle(int);
2358 
2359 extern void set_task_comm(struct task_struct *tsk, char *from);
2360 extern char *get_task_comm(char *to, struct task_struct *tsk);
2361 
2362 #ifdef CONFIG_SMP
2363 void scheduler_ipi(void);
2364 extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
2365 #else
scheduler_ipi(void)2366 static inline void scheduler_ipi(void) { }
wait_task_inactive(struct task_struct * p,long match_state)2367 static inline unsigned long wait_task_inactive(struct task_struct *p,
2368 					       long match_state)
2369 {
2370 	return 1;
2371 }
2372 #endif
2373 
2374 #define next_task(p) \
2375 	list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
2376 
2377 #define for_each_process(p) \
2378 	for (p = &init_task ; (p = next_task(p)) != &init_task ; )
2379 
2380 extern bool current_is_single_threaded(void);
2381 
2382 /*
2383  * Careful: do_each_thread/while_each_thread is a double loop so
2384  *          'break' will not work as expected - use goto instead.
2385  */
2386 #define do_each_thread(g, t) \
2387 	for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
2388 
2389 #define while_each_thread(g, t) \
2390 	while ((t = next_thread(t)) != g)
2391 
2392 #define __for_each_thread(signal, t)	\
2393 	list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
2394 
2395 #define for_each_thread(p, t)		\
2396 	__for_each_thread((p)->signal, t)
2397 
2398 /* Careful: this is a double loop, 'break' won't work as expected. */
2399 #define for_each_process_thread(p, t)	\
2400 	for_each_process(p) for_each_thread(p, t)
2401 
get_nr_threads(struct task_struct * tsk)2402 static inline int get_nr_threads(struct task_struct *tsk)
2403 {
2404 	return tsk->signal->nr_threads;
2405 }
2406 
thread_group_leader(struct task_struct * p)2407 static inline bool thread_group_leader(struct task_struct *p)
2408 {
2409 	return p->exit_signal >= 0;
2410 }
2411 
2412 /* Do to the insanities of de_thread it is possible for a process
2413  * to have the pid of the thread group leader without actually being
2414  * the thread group leader.  For iteration through the pids in proc
2415  * all we care about is that we have a task with the appropriate
2416  * pid, we don't actually care if we have the right task.
2417  */
has_group_leader_pid(struct task_struct * p)2418 static inline int has_group_leader_pid(struct task_struct *p)
2419 {
2420 	return p->pid == p->tgid;
2421 }
2422 
2423 static inline
same_thread_group(struct task_struct * p1,struct task_struct * p2)2424 int same_thread_group(struct task_struct *p1, struct task_struct *p2)
2425 {
2426 	return p1->tgid == p2->tgid;
2427 }
2428 
next_thread(const struct task_struct * p)2429 static inline struct task_struct *next_thread(const struct task_struct *p)
2430 {
2431 	return list_entry_rcu(p->thread_group.next,
2432 			      struct task_struct, thread_group);
2433 }
2434 
thread_group_empty(struct task_struct * p)2435 static inline int thread_group_empty(struct task_struct *p)
2436 {
2437 	return list_empty(&p->thread_group);
2438 }
2439 
2440 #define delay_group_leader(p) \
2441 		(thread_group_leader(p) && !thread_group_empty(p))
2442 
2443 /*
2444  * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
2445  * subscriptions and synchronises with wait4().  Also used in procfs.  Also
2446  * pins the final release of task.io_context.  Also protects ->cpuset and
2447  * ->cgroup.subsys[]. And ->vfork_done.
2448  *
2449  * Nests both inside and outside of read_lock(&tasklist_lock).
2450  * It must not be nested with write_lock_irq(&tasklist_lock),
2451  * neither inside nor outside.
2452  */
task_lock(struct task_struct * p)2453 static inline void task_lock(struct task_struct *p)
2454 {
2455 	spin_lock(&p->alloc_lock);
2456 }
2457 
task_unlock(struct task_struct * p)2458 static inline void task_unlock(struct task_struct *p)
2459 {
2460 	spin_unlock(&p->alloc_lock);
2461 }
2462 
2463 extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
2464 							unsigned long *flags);
2465 
lock_task_sighand(struct task_struct * tsk,unsigned long * flags)2466 static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
2467 						       unsigned long *flags)
2468 {
2469 	struct sighand_struct *ret;
2470 
2471 	ret = __lock_task_sighand(tsk, flags);
2472 	(void)__cond_lock(&tsk->sighand->siglock, ret);
2473 	return ret;
2474 }
2475 
unlock_task_sighand(struct task_struct * tsk,unsigned long * flags)2476 static inline void unlock_task_sighand(struct task_struct *tsk,
2477 						unsigned long *flags)
2478 {
2479 	spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
2480 }
2481 
2482 #ifdef CONFIG_CGROUPS
threadgroup_change_begin(struct task_struct * tsk)2483 static inline void threadgroup_change_begin(struct task_struct *tsk)
2484 {
2485 	down_read(&tsk->signal->group_rwsem);
2486 }
threadgroup_change_end(struct task_struct * tsk)2487 static inline void threadgroup_change_end(struct task_struct *tsk)
2488 {
2489 	up_read(&tsk->signal->group_rwsem);
2490 }
2491 
2492 /**
2493  * threadgroup_lock - lock threadgroup
2494  * @tsk: member task of the threadgroup to lock
2495  *
2496  * Lock the threadgroup @tsk belongs to.  No new task is allowed to enter
2497  * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
2498  * perform exec.  This is useful for cases where the threadgroup needs to
2499  * stay stable across blockable operations.
2500  *
2501  * fork and exit paths explicitly call threadgroup_change_{begin|end}() for
2502  * synchronization.  While held, no new task will be added to threadgroup
2503  * and no existing live task will have its PF_EXITING set.
2504  *
2505  * During exec, a task goes and puts its thread group through unusual
2506  * changes.  After de-threading, exclusive access is assumed to resources
2507  * which are usually shared by tasks in the same group - e.g. sighand may
2508  * be replaced with a new one.  Also, the exec'ing task takes over group
2509  * leader role including its pid.  Exclude these changes while locked by
2510  * grabbing cred_guard_mutex which is used to synchronize exec path.
2511  */
threadgroup_lock(struct task_struct * tsk)2512 static inline void threadgroup_lock(struct task_struct *tsk)
2513 {
2514 	/*
2515 	 * exec uses exit for de-threading nesting group_rwsem inside
2516 	 * cred_guard_mutex. Grab cred_guard_mutex first.
2517 	 */
2518 	mutex_lock(&tsk->signal->cred_guard_mutex);
2519 	down_write(&tsk->signal->group_rwsem);
2520 }
2521 
2522 /**
2523  * threadgroup_unlock - unlock threadgroup
2524  * @tsk: member task of the threadgroup to unlock
2525  *
2526  * Reverse threadgroup_lock().
2527  */
threadgroup_unlock(struct task_struct * tsk)2528 static inline void threadgroup_unlock(struct task_struct *tsk)
2529 {
2530 	up_write(&tsk->signal->group_rwsem);
2531 	mutex_unlock(&tsk->signal->cred_guard_mutex);
2532 }
2533 #else
threadgroup_change_begin(struct task_struct * tsk)2534 static inline void threadgroup_change_begin(struct task_struct *tsk) {}
threadgroup_change_end(struct task_struct * tsk)2535 static inline void threadgroup_change_end(struct task_struct *tsk) {}
threadgroup_lock(struct task_struct * tsk)2536 static inline void threadgroup_lock(struct task_struct *tsk) {}
threadgroup_unlock(struct task_struct * tsk)2537 static inline void threadgroup_unlock(struct task_struct *tsk) {}
2538 #endif
2539 
2540 #ifndef __HAVE_THREAD_FUNCTIONS
2541 
2542 #define task_thread_info(task)	((struct thread_info *)(task)->stack)
2543 #define task_stack_page(task)	((task)->stack)
2544 
setup_thread_stack(struct task_struct * p,struct task_struct * org)2545 static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
2546 {
2547 	*task_thread_info(p) = *task_thread_info(org);
2548 	task_thread_info(p)->task = p;
2549 }
2550 
end_of_stack(struct task_struct * p)2551 static inline unsigned long *end_of_stack(struct task_struct *p)
2552 {
2553 	return (unsigned long *)(task_thread_info(p) + 1);
2554 }
2555 
2556 #endif
2557 
object_is_on_stack(void * obj)2558 static inline int object_is_on_stack(void *obj)
2559 {
2560 	void *stack = task_stack_page(current);
2561 
2562 	return (obj >= stack) && (obj < (stack + THREAD_SIZE));
2563 }
2564 
2565 extern void thread_info_cache_init(void);
2566 
2567 #ifdef CONFIG_DEBUG_STACK_USAGE
stack_not_used(struct task_struct * p)2568 static inline unsigned long stack_not_used(struct task_struct *p)
2569 {
2570 	unsigned long *n = end_of_stack(p);
2571 
2572 	do { 	/* Skip over canary */
2573 		n++;
2574 	} while (!*n);
2575 
2576 	return (unsigned long)n - (unsigned long)end_of_stack(p);
2577 }
2578 #endif
2579 
2580 /* set thread flags in other task's structures
2581  * - see asm/thread_info.h for TIF_xxxx flags available
2582  */
set_tsk_thread_flag(struct task_struct * tsk,int flag)2583 static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
2584 {
2585 	set_ti_thread_flag(task_thread_info(tsk), flag);
2586 }
2587 
clear_tsk_thread_flag(struct task_struct * tsk,int flag)2588 static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2589 {
2590 	clear_ti_thread_flag(task_thread_info(tsk), flag);
2591 }
2592 
test_and_set_tsk_thread_flag(struct task_struct * tsk,int flag)2593 static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
2594 {
2595 	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
2596 }
2597 
test_and_clear_tsk_thread_flag(struct task_struct * tsk,int flag)2598 static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2599 {
2600 	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
2601 }
2602 
test_tsk_thread_flag(struct task_struct * tsk,int flag)2603 static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
2604 {
2605 	return test_ti_thread_flag(task_thread_info(tsk), flag);
2606 }
2607 
set_tsk_need_resched(struct task_struct * tsk)2608 static inline void set_tsk_need_resched(struct task_struct *tsk)
2609 {
2610 	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2611 }
2612 
clear_tsk_need_resched(struct task_struct * tsk)2613 static inline void clear_tsk_need_resched(struct task_struct *tsk)
2614 {
2615 	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2616 }
2617 
test_tsk_need_resched(struct task_struct * tsk)2618 static inline int test_tsk_need_resched(struct task_struct *tsk)
2619 {
2620 	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
2621 }
2622 
restart_syscall(void)2623 static inline int restart_syscall(void)
2624 {
2625 	set_tsk_thread_flag(current, TIF_SIGPENDING);
2626 	return -ERESTARTNOINTR;
2627 }
2628 
signal_pending(struct task_struct * p)2629 static inline int signal_pending(struct task_struct *p)
2630 {
2631 	return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
2632 }
2633 
__fatal_signal_pending(struct task_struct * p)2634 static inline int __fatal_signal_pending(struct task_struct *p)
2635 {
2636 	return unlikely(sigismember(&p->pending.signal, SIGKILL));
2637 }
2638 
fatal_signal_pending(struct task_struct * p)2639 static inline int fatal_signal_pending(struct task_struct *p)
2640 {
2641 	return signal_pending(p) && __fatal_signal_pending(p);
2642 }
2643 
signal_pending_state(long state,struct task_struct * p)2644 static inline int signal_pending_state(long state, struct task_struct *p)
2645 {
2646 	if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
2647 		return 0;
2648 	if (!signal_pending(p))
2649 		return 0;
2650 
2651 	return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
2652 }
2653 
need_resched(void)2654 static inline int need_resched(void)
2655 {
2656 	return unlikely(test_thread_flag(TIF_NEED_RESCHED));
2657 }
2658 
2659 /*
2660  * cond_resched() and cond_resched_lock(): latency reduction via
2661  * explicit rescheduling in places that are safe. The return
2662  * value indicates whether a reschedule was done in fact.
2663  * cond_resched_lock() will drop the spinlock before scheduling,
2664  * cond_resched_softirq() will enable bhs before scheduling.
2665  */
2666 extern int _cond_resched(void);
2667 
2668 #define cond_resched() ({			\
2669 	__might_sleep(__FILE__, __LINE__, 0);	\
2670 	_cond_resched();			\
2671 })
2672 
2673 extern int __cond_resched_lock(spinlock_t *lock);
2674 
2675 #ifdef CONFIG_PREEMPT_COUNT
2676 #define PREEMPT_LOCK_OFFSET	PREEMPT_OFFSET
2677 #else
2678 #define PREEMPT_LOCK_OFFSET	0
2679 #endif
2680 
2681 #define cond_resched_lock(lock) ({				\
2682 	__might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);	\
2683 	__cond_resched_lock(lock);				\
2684 })
2685 
2686 extern int __cond_resched_softirq(void);
2687 
2688 #define cond_resched_softirq() ({					\
2689 	__might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET);	\
2690 	__cond_resched_softirq();					\
2691 })
2692 
2693 /*
2694  * Does a critical section need to be broken due to another
2695  * task waiting?: (technically does not depend on CONFIG_PREEMPT,
2696  * but a general need for low latency)
2697  */
spin_needbreak(spinlock_t * lock)2698 static inline int spin_needbreak(spinlock_t *lock)
2699 {
2700 #ifdef CONFIG_PREEMPT
2701 	return spin_is_contended(lock);
2702 #else
2703 	return 0;
2704 #endif
2705 }
2706 
2707 /*
2708  * Thread group CPU time accounting.
2709  */
2710 void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
2711 void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
2712 
thread_group_cputime_init(struct signal_struct * sig)2713 static inline void thread_group_cputime_init(struct signal_struct *sig)
2714 {
2715 	raw_spin_lock_init(&sig->cputimer.lock);
2716 }
2717 
2718 /*
2719  * Reevaluate whether the task has signals pending delivery.
2720  * Wake the task if so.
2721  * This is required every time the blocked sigset_t changes.
2722  * callers must hold sighand->siglock.
2723  */
2724 extern void recalc_sigpending_and_wake(struct task_struct *t);
2725 extern void recalc_sigpending(void);
2726 
2727 extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
2728 
signal_wake_up(struct task_struct * t,bool resume)2729 static inline void signal_wake_up(struct task_struct *t, bool resume)
2730 {
2731 	signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
2732 }
ptrace_signal_wake_up(struct task_struct * t,bool resume)2733 static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
2734 {
2735 	signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
2736 }
2737 
2738 /*
2739  * Wrappers for p->thread_info->cpu access. No-op on UP.
2740  */
2741 #ifdef CONFIG_SMP
2742 
task_cpu(const struct task_struct * p)2743 static inline unsigned int task_cpu(const struct task_struct *p)
2744 {
2745 	return task_thread_info(p)->cpu;
2746 }
2747 
2748 extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
2749 
2750 #else
2751 
task_cpu(const struct task_struct * p)2752 static inline unsigned int task_cpu(const struct task_struct *p)
2753 {
2754 	return 0;
2755 }
2756 
set_task_cpu(struct task_struct * p,unsigned int cpu)2757 static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2758 {
2759 }
2760 
2761 #endif /* CONFIG_SMP */
2762 
2763 extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2764 extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2765 
2766 extern void normalize_rt_tasks(void);
2767 
2768 #ifdef CONFIG_CGROUP_SCHED
2769 
2770 extern struct task_group root_task_group;
2771 
2772 extern struct task_group *sched_create_group(struct task_group *parent);
2773 extern void sched_destroy_group(struct task_group *tg);
2774 extern void sched_move_task(struct task_struct *tsk);
2775 #ifdef CONFIG_FAIR_GROUP_SCHED
2776 extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
2777 extern unsigned long sched_group_shares(struct task_group *tg);
2778 #endif
2779 #ifdef CONFIG_RT_GROUP_SCHED
2780 extern int sched_group_set_rt_runtime(struct task_group *tg,
2781 				      long rt_runtime_us);
2782 extern long sched_group_rt_runtime(struct task_group *tg);
2783 extern int sched_group_set_rt_period(struct task_group *tg,
2784 				      long rt_period_us);
2785 extern long sched_group_rt_period(struct task_group *tg);
2786 extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
2787 #endif
2788 #endif /* CONFIG_CGROUP_SCHED */
2789 
2790 extern int task_can_switch_user(struct user_struct *up,
2791 					struct task_struct *tsk);
2792 
2793 #ifdef CONFIG_TASK_XACCT
add_rchar(struct task_struct * tsk,ssize_t amt)2794 static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2795 {
2796 	tsk->ioac.rchar += amt;
2797 }
2798 
add_wchar(struct task_struct * tsk,ssize_t amt)2799 static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
2800 {
2801 	tsk->ioac.wchar += amt;
2802 }
2803 
inc_syscr(struct task_struct * tsk)2804 static inline void inc_syscr(struct task_struct *tsk)
2805 {
2806 	tsk->ioac.syscr++;
2807 }
2808 
inc_syscw(struct task_struct * tsk)2809 static inline void inc_syscw(struct task_struct *tsk)
2810 {
2811 	tsk->ioac.syscw++;
2812 }
2813 #else
add_rchar(struct task_struct * tsk,ssize_t amt)2814 static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2815 {
2816 }
2817 
add_wchar(struct task_struct * tsk,ssize_t amt)2818 static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
2819 {
2820 }
2821 
inc_syscr(struct task_struct * tsk)2822 static inline void inc_syscr(struct task_struct *tsk)
2823 {
2824 }
2825 
inc_syscw(struct task_struct * tsk)2826 static inline void inc_syscw(struct task_struct *tsk)
2827 {
2828 }
2829 #endif
2830 
2831 #ifndef TASK_SIZE_OF
2832 #define TASK_SIZE_OF(tsk)	TASK_SIZE
2833 #endif
2834 
2835 #ifdef CONFIG_MM_OWNER
2836 extern void mm_update_next_owner(struct mm_struct *mm);
2837 extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
2838 #else
mm_update_next_owner(struct mm_struct * mm)2839 static inline void mm_update_next_owner(struct mm_struct *mm)
2840 {
2841 }
2842 
mm_init_owner(struct mm_struct * mm,struct task_struct * p)2843 static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
2844 {
2845 }
2846 #endif /* CONFIG_MM_OWNER */
2847 
task_rlimit(const struct task_struct * tsk,unsigned int limit)2848 static inline unsigned long task_rlimit(const struct task_struct *tsk,
2849 		unsigned int limit)
2850 {
2851 	return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);
2852 }
2853 
task_rlimit_max(const struct task_struct * tsk,unsigned int limit)2854 static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
2855 		unsigned int limit)
2856 {
2857 	return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);
2858 }
2859 
rlimit(unsigned int limit)2860 static inline unsigned long rlimit(unsigned int limit)
2861 {
2862 	return task_rlimit(current, limit);
2863 }
2864 
rlimit_max(unsigned int limit)2865 static inline unsigned long rlimit_max(unsigned int limit)
2866 {
2867 	return task_rlimit_max(current, limit);
2868 }
2869 
2870 #endif /* __KERNEL__ */
2871 
2872 #endif
2873