• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MM_TYPES_TASK_H
3 #define _LINUX_MM_TYPES_TASK_H
4 
5 /*
6  * Here are the definitions of the MM data types that are embedded in 'struct task_struct'.
7  *
8  * (These are defined separately to decouple sched.h from mm_types.h as much as possible.)
9  */
10 
11 #include <linux/types.h>
12 #include <linux/threads.h>
13 #include <linux/atomic.h>
14 #include <linux/cpumask.h>
15 
16 #include <asm/page.h>
17 
18 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
19 #include <asm/tlbbatch.h>
20 #endif
21 
22 #define USE_SPLIT_PTE_PTLOCKS	(NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
23 #define USE_SPLIT_PMD_PTLOCKS	(USE_SPLIT_PTE_PTLOCKS && \
24 		IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
25 #define ALLOC_SPLIT_PTLOCKS	(SPINLOCK_SIZE > BITS_PER_LONG/8)
26 
27 /*
28  * The per task VMA cache array:
29  */
30 #define VMACACHE_BITS 2
31 #define VMACACHE_SIZE (1U << VMACACHE_BITS)
32 #define VMACACHE_MASK (VMACACHE_SIZE - 1)
33 
34 struct vmacache {
35 	u64 seqnum;
36 	struct vm_area_struct *vmas[VMACACHE_SIZE];
37 };
38 
39 /*
40  * When updating this, please also update struct resident_page_types[] in
41  * kernel/fork.c
42  */
43 enum {
44 	MM_FILEPAGES,	/* Resident file mapping pages */
45 	MM_ANONPAGES,	/* Resident anonymous pages */
46 	MM_SWAPENTS,	/* Anonymous swap entries */
47 	MM_SHMEMPAGES,	/* Resident shared memory pages */
48 	NR_MM_COUNTERS
49 };
50 
51 #if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU)
52 #define SPLIT_RSS_COUNTING
53 /* per-thread cached information, */
54 struct task_rss_stat {
55 	int events;	/* for synchronization threshold */
56 	int count[NR_MM_COUNTERS];
57 };
58 #endif /* USE_SPLIT_PTE_PTLOCKS */
59 
60 struct mm_rss_stat {
61 	atomic_long_t count[NR_MM_COUNTERS];
62 };
63 
64 struct page_frag {
65 	struct page *page;
66 #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
67 	__u32 offset;
68 	__u32 size;
69 #else
70 	__u16 offset;
71 	__u16 size;
72 #endif
73 };
74 
75 /* Track pages that require TLB flushes */
76 struct tlbflush_unmap_batch {
77 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
78 	/*
79 	 * The arch code makes the following promise: generic code can modify a
80 	 * PTE, then call arch_tlbbatch_add_mm() (which internally provides all
81 	 * needed barriers), then call arch_tlbbatch_flush(), and the entries
82 	 * will be flushed on all CPUs by the time that arch_tlbbatch_flush()
83 	 * returns.
84 	 */
85 	struct arch_tlbflush_unmap_batch arch;
86 
87 	/* True if a flush is needed. */
88 	bool flush_required;
89 
90 	/*
91 	 * If true then the PTE was dirty when unmapped. The entry must be
92 	 * flushed before IO is initiated or a stale TLB entry potentially
93 	 * allows an update without redirtying the page.
94 	 */
95 	bool writable;
96 #endif
97 };
98 
99 #endif /* _LINUX_MM_TYPES_TASK_H */
100