• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  /* SPDX-License-Identifier: GPL-2.0 */
2  #ifndef _LINUX_MM_TYPES_H
3  #define _LINUX_MM_TYPES_H
4  
5  #include <linux/mm_types_task.h>
6  
7  #include <linux/auxvec.h>
8  #include <linux/kref.h>
9  #include <linux/list.h>
10  #include <linux/spinlock.h>
11  #include <linux/rbtree.h>
12  #include <linux/rwsem.h>
13  #include <linux/completion.h>
14  #include <linux/cpumask.h>
15  #include <linux/uprobes.h>
16  #include <linux/page-flags-layout.h>
17  #include <linux/workqueue.h>
18  #include <linux/seqlock.h>
19  #include <linux/android_kabi.h>
20  
21  #include <asm/mmu.h>
22  
23  #ifndef AT_VECTOR_SIZE_ARCH
24  #define AT_VECTOR_SIZE_ARCH 0
25  #endif
26  #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
27  
28  #define INIT_PASID	0
29  
30  struct address_space;
31  struct mem_cgroup;
32  
33  /*
34   * Each physical page in the system has a struct page associated with
35   * it to keep track of whatever it is we are using the page for at the
36   * moment. Note that we have no way to track which tasks are using
37   * a page, though if it is a pagecache page, rmap structures can tell us
38   * who is mapping it.
39   *
40   * If you allocate the page using alloc_pages(), you can use some of the
41   * space in struct page for your own purposes.  The five words in the main
42   * union are available, except for bit 0 of the first word which must be
43   * kept clear.  Many users use this word to store a pointer to an object
44   * which is guaranteed to be aligned.  If you use the same storage as
45   * page->mapping, you must restore it to NULL before freeing the page.
46   *
47   * If your page will not be mapped to userspace, you can also use the four
48   * bytes in the mapcount union, but you must call page_mapcount_reset()
49   * before freeing it.
50   *
51   * If you want to use the refcount field, it must be used in such a way
52   * that other CPUs temporarily incrementing and then decrementing the
53   * refcount does not cause problems.  On receiving the page from
54   * alloc_pages(), the refcount will be positive.
55   *
56   * If you allocate pages of order > 0, you can use some of the fields
57   * in each subpage, but you may need to restore some of their values
58   * afterwards.
59   *
60   * SLUB uses cmpxchg_double() to atomically update its freelist and
61   * counters.  That requires that freelist & counters be adjacent and
62   * double-word aligned.  We align all struct pages to double-word
63   * boundaries, and ensure that 'freelist' is aligned within the
64   * struct.
65   */
66  #ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
67  #define _struct_page_alignment	__aligned(2 * sizeof(unsigned long))
68  #else
69  #define _struct_page_alignment
70  #endif
71  
72  struct page {
73  	unsigned long flags;		/* Atomic flags, some possibly
74  					 * updated asynchronously */
75  	/*
76  	 * Five words (20/40 bytes) are available in this union.
77  	 * WARNING: bit 0 of the first word is used for PageTail(). That
78  	 * means the other users of this union MUST NOT use the bit to
79  	 * avoid collision and false-positive PageTail().
80  	 */
81  	union {
82  		struct {	/* Page cache and anonymous pages */
83  			union {
84  				/**
85  				 * @lru: Pageout list, eg. active_list protected by
86  				 * lruvec->lru_lock.  Sometimes used as a generic list
87  				 * by the page owner.
88  				 */
89  				struct list_head lru;
90  
91  				/* Or, free page */
92  				struct list_head buddy_list;
93  				struct list_head pcp_list;
94  			};
95  			/* See page-flags.h for PAGE_MAPPING_FLAGS */
96  			struct address_space *mapping;
97  			pgoff_t index;		/* Our offset within mapping. */
98  			/**
99  			 * @private: Mapping-private opaque data.
100  			 * Usually used for buffer_heads if PagePrivate.
101  			 * Used for swp_entry_t if PageSwapCache.
102  			 * Indicates order in the buddy system if PageBuddy.
103  			 */
104  			unsigned long private;
105  		};
106  		struct {	/* page_pool used by netstack */
107  			/**
108  			 * @pp_magic: magic value to avoid recycling non
109  			 * page_pool allocated pages.
110  			 */
111  			unsigned long pp_magic;
112  			struct page_pool *pp;
113  			unsigned long _pp_mapping_pad;
114  			unsigned long dma_addr;
115  			union {
116  				/**
117  				 * dma_addr_upper: might require a 64-bit
118  				 * value on 32-bit architectures.
119  				 */
120  				unsigned long dma_addr_upper;
121  				/**
122  				 * For frag page support, not supported in
123  				 * 32-bit architectures with 64-bit DMA.
124  				 */
125  				atomic_long_t pp_frag_count;
126  			};
127  		};
128  		struct {	/* slab, slob and slub */
129  			union {
130  				struct list_head slab_list;
131  				struct {	/* Partial pages */
132  					struct page *next;
133  #ifdef CONFIG_64BIT
134  					int pages;	/* Nr of pages left */
135  					int pobjects;	/* Approximate count */
136  #else
137  					short int pages;
138  					short int pobjects;
139  #endif
140  				};
141  			};
142  			struct kmem_cache *slab_cache; /* not slob */
143  			/* Double-word boundary */
144  			void *freelist;		/* first free object */
145  			union {
146  				void *s_mem;	/* slab: first object */
147  				unsigned long counters;		/* SLUB */
148  				struct {			/* SLUB */
149  					unsigned inuse:16;
150  					unsigned objects:15;
151  					unsigned frozen:1;
152  				};
153  			};
154  		};
155  		struct {	/* Tail pages of compound page */
156  			unsigned long compound_head;	/* Bit zero is set */
157  
158  			/* First tail page only */
159  			unsigned char compound_dtor;
160  			unsigned char compound_order;
161  			atomic_t compound_mapcount;
162  			unsigned int compound_nr; /* 1 << compound_order */
163  		};
164  		struct {	/* Second tail page of compound page */
165  			unsigned long _compound_pad_1;	/* compound_head */
166  			atomic_t hpage_pinned_refcount;
167  			/* For both global and memcg */
168  			struct list_head deferred_list;
169  		};
170  		struct {	/* Page table pages */
171  			unsigned long _pt_pad_1;	/* compound_head */
172  			pgtable_t pmd_huge_pte; /* protected by page->ptl */
173  			unsigned long _pt_pad_2;	/* mapping */
174  			union {
175  				struct mm_struct *pt_mm; /* x86 pgds only */
176  				atomic_t pt_frag_refcount; /* powerpc */
177  			};
178  #if ALLOC_SPLIT_PTLOCKS
179  			spinlock_t *ptl;
180  #else
181  			spinlock_t ptl;
182  #endif
183  		};
184  		struct {	/* ZONE_DEVICE pages */
185  			/** @pgmap: Points to the hosting device page map. */
186  			struct dev_pagemap *pgmap;
187  			void *zone_device_data;
188  			/*
189  			 * ZONE_DEVICE private pages are counted as being
190  			 * mapped so the next 3 words hold the mapping, index,
191  			 * and private fields from the source anonymous or
192  			 * page cache page while the page is migrated to device
193  			 * private memory.
194  			 * ZONE_DEVICE MEMORY_DEVICE_FS_DAX pages also
195  			 * use the mapping, index, and private fields when
196  			 * pmem backed DAX files are mapped.
197  			 */
198  		};
199  
200  		/** @rcu_head: You can use this to free a page by RCU. */
201  		struct rcu_head rcu_head;
202  	};
203  
204  	union {		/* This union is 4 bytes in size. */
205  		/*
206  		 * If the page can be mapped to userspace, encodes the number
207  		 * of times this page is referenced by a page table.
208  		 */
209  		atomic_t _mapcount;
210  
211  		/*
212  		 * If the page is neither PageSlab nor mappable to userspace,
213  		 * the value stored here may help determine what this page
214  		 * is used for.  See page-flags.h for a list of page types
215  		 * which are currently stored here.
216  		 */
217  		unsigned int page_type;
218  
219  		unsigned int active;		/* SLAB */
220  		int units;			/* SLOB */
221  	};
222  
223  	/* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */
224  	atomic_t _refcount;
225  
226  #ifdef CONFIG_MEMCG
227  	unsigned long memcg_data;
228  #endif
229  
230  	/*
231  	 * On machines where all RAM is mapped into kernel address space,
232  	 * we can simply calculate the virtual address. On machines with
233  	 * highmem some memory is mapped into kernel virtual memory
234  	 * dynamically, so we need a place to store that address.
235  	 * Note that this field could be 16 bits on x86 ... ;)
236  	 *
237  	 * Architectures with slow multiplication can define
238  	 * WANT_PAGE_VIRTUAL in asm/page.h
239  	 */
240  #if defined(WANT_PAGE_VIRTUAL)
241  	void *virtual;			/* Kernel virtual address (NULL if
242  					   not kmapped, ie. highmem) */
243  #endif /* WANT_PAGE_VIRTUAL */
244  
245  #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
246  	int _last_cpupid;
247  #endif
248  } _struct_page_alignment;
249  
compound_mapcount_ptr(struct page * page)250  static inline atomic_t *compound_mapcount_ptr(struct page *page)
251  {
252  	return &page[1].compound_mapcount;
253  }
254  
compound_pincount_ptr(struct page * page)255  static inline atomic_t *compound_pincount_ptr(struct page *page)
256  {
257  	return &page[2].hpage_pinned_refcount;
258  }
259  
260  /*
261   * Used for sizing the vmemmap region on some architectures
262   */
263  #define STRUCT_PAGE_MAX_SHIFT	(order_base_2(sizeof(struct page)))
264  
265  #define PAGE_FRAG_CACHE_MAX_SIZE	__ALIGN_MASK(32768, ~PAGE_MASK)
266  #define PAGE_FRAG_CACHE_MAX_ORDER	get_order(PAGE_FRAG_CACHE_MAX_SIZE)
267  
268  #define page_private(page)		((page)->private)
269  
set_page_private(struct page * page,unsigned long private)270  static inline void set_page_private(struct page *page, unsigned long private)
271  {
272  	page->private = private;
273  }
274  
275  struct page_frag_cache {
276  	void * va;
277  #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
278  	__u16 offset;
279  	__u16 size;
280  #else
281  	__u32 offset;
282  #endif
283  	/* we maintain a pagecount bias, so that we dont dirty cache line
284  	 * containing page->_refcount every time we allocate a fragment.
285  	 */
286  	unsigned int		pagecnt_bias;
287  	bool pfmemalloc;
288  };
289  
290  typedef unsigned long vm_flags_t;
291  
292  /*
293   * A region containing a mapping of a non-memory backed file under NOMMU
294   * conditions.  These are held in a global tree and are pinned by the VMAs that
295   * map parts of them.
296   */
297  struct vm_region {
298  	struct rb_node	vm_rb;		/* link in global region tree */
299  	vm_flags_t	vm_flags;	/* VMA vm_flags */
300  	unsigned long	vm_start;	/* start address of region */
301  	unsigned long	vm_end;		/* region initialised to here */
302  	unsigned long	vm_top;		/* region allocated to here */
303  	unsigned long	vm_pgoff;	/* the offset in vm_file corresponding to vm_start */
304  	struct file	*vm_file;	/* the backing file or NULL */
305  
306  	int		vm_usage;	/* region usage count (access under nommu_region_sem) */
307  	bool		vm_icache_flushed : 1; /* true if the icache has been flushed for
308  						* this region */
309  };
310  
311  #ifdef CONFIG_USERFAULTFD
312  #define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) { NULL, })
313  struct vm_userfaultfd_ctx {
314  	struct userfaultfd_ctx __rcu *ctx;
315  };
316  #else /* CONFIG_USERFAULTFD */
317  #define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) {})
318  struct vm_userfaultfd_ctx {};
319  #endif /* CONFIG_USERFAULTFD */
320  
321  struct anon_vma_name {
322  	struct kref kref;
323  	/* The name needs to be at the end because it is dynamically sized. */
324  	char name[];
325  };
326  
327  /*
328   * This struct describes a virtual memory area. There is one of these
329   * per VM-area/task. A VM area is any part of the process virtual memory
330   * space that has a special rule for the page-fault handlers (ie a shared
331   * library, the executable area etc).
332   *
333   * Note that speculative page faults make an on-stack copy of the VMA,
334   * so the structure size matters.
335   * (TODO - it would be preferable to copy only the required vma attributes
336   *  rather than the entire vma).
337   */
338  struct vm_area_struct {
339  	/* The first cache line has the info for VMA tree walking. */
340  
341  	union {
342  		struct {
343  			/* VMA covers [vm_start; vm_end) addresses within mm */
344  			unsigned long vm_start, vm_end;
345  
346  			/* linked list of VMAs per task, sorted by address */
347  			struct vm_area_struct *vm_next, *vm_prev;
348  		};
349  #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
350  		struct rcu_head vm_rcu;	/* Used for deferred freeing. */
351  #endif
352  	};
353  
354  	struct rb_node vm_rb;
355  
356  	/*
357  	 * Largest free memory gap in bytes to the left of this VMA.
358  	 * Either between this VMA and vma->vm_prev, or between one of the
359  	 * VMAs below us in the VMA rbtree and its ->vm_prev. This helps
360  	 * get_unmapped_area find a free area of the right size.
361  	 */
362  	unsigned long rb_subtree_gap;
363  
364  	/* Second cache line starts here. */
365  
366  	struct mm_struct *vm_mm;	/* The address space we belong to. */
367  
368  	/*
369  	 * Access permissions of this VMA.
370  	 * See vmf_insert_mixed_prot() for discussion.
371  	 */
372  	pgprot_t vm_page_prot;
373  	unsigned long vm_flags;		/* Flags, see mm.h. */
374  
375  	/*
376  	 * For areas with an address space and backing store,
377  	 * linkage into the address_space->i_mmap interval tree.
378  	 *
379  	 * For private anonymous mappings, a pointer to a null terminated string
380  	 * containing the name given to the vma, or NULL if unnamed.
381  	 */
382  
383  	union {
384  		struct {
385  			struct rb_node rb;
386  			unsigned long rb_subtree_last;
387  		} shared;
388  		/*
389  		 * Serialized by mmap_sem. Never use directly because it is
390  		 * valid only when vm_file is NULL. Use anon_vma_name instead.
391  		 */
392  		struct anon_vma_name *anon_name;
393  	};
394  
395  	/*
396  	 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
397  	 * list, after a COW of one of the file pages.	A MAP_SHARED vma
398  	 * can only be in the i_mmap tree.  An anonymous MAP_PRIVATE, stack
399  	 * or brk vma (with NULL file) can only be in an anon_vma list.
400  	 */
401  	struct list_head anon_vma_chain; /* Serialized by mmap_lock &
402  					  * page_table_lock */
403  	struct anon_vma *anon_vma;	/* Serialized by page_table_lock */
404  
405  	/* Function pointers to deal with this struct. */
406  	const struct vm_operations_struct *vm_ops;
407  
408  	/* Information about our backing store: */
409  	unsigned long vm_pgoff;		/* Offset (within vm_file) in PAGE_SIZE
410  					   units */
411  	struct file * vm_file;		/* File we map to (can be NULL). */
412  	void * vm_private_data;		/* was vm_pte (shared mem) */
413  
414  #ifdef CONFIG_SWAP
415  	atomic_long_t swap_readahead_info;
416  #endif
417  #ifndef CONFIG_MMU
418  	struct vm_region *vm_region;	/* NOMMU mapping region */
419  #endif
420  #ifdef CONFIG_NUMA
421  	struct mempolicy *vm_policy;	/* NUMA policy for the VMA */
422  #endif
423  	struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
424  #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
425  	/*
426  	 * The name does not reflect the usage and is not renamed to keep
427  	 * the ABI intact.
428  	 * This is used to refcount VMA in get_vma/put_vma.
429  	 */
430  	atomic_t file_ref_count;
431  #endif
432  
433  	ANDROID_KABI_RESERVE(1);
434  	ANDROID_KABI_RESERVE(2);
435  	ANDROID_KABI_RESERVE(3);
436  	ANDROID_KABI_RESERVE(4);
437  } __randomize_layout;
438  
439  struct core_thread {
440  	struct task_struct *task;
441  	struct core_thread *next;
442  };
443  
444  struct core_state {
445  	atomic_t nr_threads;
446  	struct core_thread dumper;
447  	struct completion startup;
448  };
449  
450  struct kioctx_table;
451  struct percpu_rw_semaphore;
452  struct mm_struct {
453  	struct {
454  		struct vm_area_struct *mmap;		/* list of VMAs */
455  		struct rb_root mm_rb;
456  		u64 vmacache_seqnum;                   /* per-thread vmacache */
457  #ifdef CONFIG_MMU
458  		unsigned long (*get_unmapped_area) (struct file *filp,
459  				unsigned long addr, unsigned long len,
460  				unsigned long pgoff, unsigned long flags);
461  #endif
462  		unsigned long mmap_base;	/* base of mmap area */
463  		unsigned long mmap_legacy_base;	/* base of mmap area in bottom-up allocations */
464  #ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
465  		/* Base addresses for compatible mmap() */
466  		unsigned long mmap_compat_base;
467  		unsigned long mmap_compat_legacy_base;
468  #endif
469  		unsigned long task_size;	/* size of task vm space */
470  		unsigned long highest_vm_end;	/* highest vma end address */
471  		pgd_t * pgd;
472  
473  #ifdef CONFIG_MEMBARRIER
474  		/**
475  		 * @membarrier_state: Flags controlling membarrier behavior.
476  		 *
477  		 * This field is close to @pgd to hopefully fit in the same
478  		 * cache-line, which needs to be touched by switch_mm().
479  		 */
480  		atomic_t membarrier_state;
481  #endif
482  
483  		/**
484  		 * @mm_users: The number of users including userspace.
485  		 *
486  		 * Use mmget()/mmget_not_zero()/mmput() to modify. When this
487  		 * drops to 0 (i.e. when the task exits and there are no other
488  		 * temporary reference holders), we also release a reference on
489  		 * @mm_count (which may then free the &struct mm_struct if
490  		 * @mm_count also drops to 0).
491  		 */
492  		atomic_t mm_users;
493  
494  		/**
495  		 * @mm_count: The number of references to &struct mm_struct
496  		 * (@mm_users count as 1).
497  		 *
498  		 * Use mmgrab()/mmdrop() to modify. When this drops to 0, the
499  		 * &struct mm_struct is freed.
500  		 */
501  		atomic_t mm_count;
502  
503  #ifdef CONFIG_MMU
504  		atomic_long_t pgtables_bytes;	/* PTE page table pages */
505  #endif
506  		int map_count;			/* number of VMAs */
507  
508  		spinlock_t page_table_lock; /* Protects page tables and some
509  					     * counters
510  					     */
511  		/*
512  		 * With some kernel config, the current mmap_lock's offset
513  		 * inside 'mm_struct' is at 0x120, which is very optimal, as
514  		 * its two hot fields 'count' and 'owner' sit in 2 different
515  		 * cachelines,  and when mmap_lock is highly contended, both
516  		 * of the 2 fields will be accessed frequently, current layout
517  		 * will help to reduce cache bouncing.
518  		 *
519  		 * So please be careful with adding new fields before
520  		 * mmap_lock, which can easily push the 2 fields into one
521  		 * cacheline.
522  		 */
523  		struct rw_semaphore mmap_lock;
524  #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
525  		unsigned long mmap_seq;
526  #endif
527  
528  
529  		struct list_head mmlist; /* List of maybe swapped mm's.	These
530  					  * are globally strung together off
531  					  * init_mm.mmlist, and are protected
532  					  * by mmlist_lock
533  					  */
534  
535  
536  		unsigned long hiwater_rss; /* High-watermark of RSS usage */
537  		unsigned long hiwater_vm;  /* High-water virtual memory usage */
538  
539  		unsigned long total_vm;	   /* Total pages mapped */
540  		unsigned long locked_vm;   /* Pages that have PG_mlocked set */
541  		atomic64_t    pinned_vm;   /* Refcount permanently increased */
542  		unsigned long data_vm;	   /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
543  		unsigned long exec_vm;	   /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
544  		unsigned long stack_vm;	   /* VM_STACK */
545  		unsigned long def_flags;
546  
547  		/**
548  		 * @write_protect_seq: Locked when any thread is write
549  		 * protecting pages mapped by this mm to enforce a later COW,
550  		 * for instance during page table copying for fork().
551  		 */
552  		seqcount_t write_protect_seq;
553  
554  		spinlock_t arg_lock; /* protect the below fields */
555  
556  		unsigned long start_code, end_code, start_data, end_data;
557  		unsigned long start_brk, brk, start_stack;
558  		unsigned long arg_start, arg_end, env_start, env_end;
559  
560  		unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
561  
562  		/*
563  		 * Special counters, in some configurations protected by the
564  		 * page_table_lock, in other configurations by being atomic.
565  		 */
566  		struct mm_rss_stat rss_stat;
567  
568  		struct linux_binfmt *binfmt;
569  
570  		/* Architecture-specific MM context */
571  		mm_context_t context;
572  
573  		unsigned long flags; /* Must use atomic bitops to access */
574  
575  		struct core_state *core_state; /* coredumping support */
576  
577  #ifdef CONFIG_AIO
578  		spinlock_t			ioctx_lock;
579  		struct kioctx_table __rcu	*ioctx_table;
580  #endif
581  #ifdef CONFIG_MEMCG
582  		/*
583  		 * "owner" points to a task that is regarded as the canonical
584  		 * user/owner of this mm. All of the following must be true in
585  		 * order for it to be changed:
586  		 *
587  		 * current == mm->owner
588  		 * current->mm != mm
589  		 * new_owner->mm == mm
590  		 * new_owner->alloc_lock is held
591  		 */
592  		struct task_struct __rcu *owner;
593  #endif
594  		struct user_namespace *user_ns;
595  
596  		/* store ref to file /proc/<pid>/exe symlink points to */
597  		struct file __rcu *exe_file;
598  #ifdef CONFIG_MMU_NOTIFIER
599  		struct mmu_notifier_subscriptions *notifier_subscriptions;
600  #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
601  		struct percpu_rw_semaphore *mmu_notifier_lock;
602  #endif	/* CONFIG_SPECULATIVE_PAGE_FAULT */
603  #endif	/* CONFIG_MMU_NOTIFIER */
604  #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
605  		pgtable_t pmd_huge_pte; /* protected by page_table_lock */
606  #endif
607  #ifdef CONFIG_NUMA_BALANCING
608  		/*
609  		 * numa_next_scan is the next time that the PTEs will be marked
610  		 * pte_numa. NUMA hinting faults will gather statistics and
611  		 * migrate pages to new nodes if necessary.
612  		 */
613  		unsigned long numa_next_scan;
614  
615  		/* Restart point for scanning and setting pte_numa */
616  		unsigned long numa_scan_offset;
617  
618  		/* numa_scan_seq prevents two threads setting pte_numa */
619  		int numa_scan_seq;
620  #endif
621  		/*
622  		 * An operation with batched TLB flushing is going on. Anything
623  		 * that can move process memory needs to flush the TLB when
624  		 * moving a PROT_NONE or PROT_NUMA mapped page.
625  		 */
626  		atomic_t tlb_flush_pending;
627  #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
628  		/* See flush_tlb_batched_pending() */
629  		bool tlb_flush_batched;
630  #endif
631  		struct uprobes_state uprobes_state;
632  #ifdef CONFIG_HUGETLB_PAGE
633  		atomic_long_t hugetlb_usage;
634  #endif
635  		struct work_struct async_put_work;
636  
637  #ifdef CONFIG_IOMMU_SUPPORT
638  		u32 pasid;
639  #endif
640  #ifdef CONFIG_LRU_GEN
641  		struct {
642  			/* this mm_struct is on lru_gen_mm_list */
643  			struct list_head list;
644  			/*
645  			 * Set when switching to this mm_struct, as a hint of
646  			 * whether it has been used since the last time per-node
647  			 * page table walkers cleared the corresponding bits.
648  			 */
649  			unsigned long bitmap;
650  #ifdef CONFIG_MEMCG
651  			/* points to the memcg of "owner" above */
652  			struct mem_cgroup *memcg;
653  #endif
654  		} lru_gen;
655  #endif /* CONFIG_LRU_GEN */
656  
657  		ANDROID_KABI_RESERVE(1);
658  	} __randomize_layout;
659  
660  	/*
661  	 * The mm_cpumask needs to be at the end of mm_struct, because it
662  	 * is dynamically sized based on nr_cpu_ids.
663  	 */
664  	unsigned long cpu_bitmap[];
665  };
666  
667  extern struct mm_struct init_mm;
668  
669  /* Pointer magic because the dynamic array size confuses some compilers. */
mm_init_cpumask(struct mm_struct * mm)670  static inline void mm_init_cpumask(struct mm_struct *mm)
671  {
672  	unsigned long cpu_bitmap = (unsigned long)mm;
673  
674  	cpu_bitmap += offsetof(struct mm_struct, cpu_bitmap);
675  	cpumask_clear((struct cpumask *)cpu_bitmap);
676  }
677  
678  /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
mm_cpumask(struct mm_struct * mm)679  static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
680  {
681  	return (struct cpumask *)&mm->cpu_bitmap;
682  }
683  
684  #ifdef CONFIG_LRU_GEN
685  
686  struct lru_gen_mm_list {
687  	/* mm_struct list for page table walkers */
688  	struct list_head fifo;
689  	/* protects the list above */
690  	spinlock_t lock;
691  };
692  
693  void lru_gen_add_mm(struct mm_struct *mm);
694  void lru_gen_del_mm(struct mm_struct *mm);
695  #ifdef CONFIG_MEMCG
696  void lru_gen_migrate_mm(struct mm_struct *mm);
697  #endif
698  
lru_gen_init_mm(struct mm_struct * mm)699  static inline void lru_gen_init_mm(struct mm_struct *mm)
700  {
701  	INIT_LIST_HEAD(&mm->lru_gen.list);
702  	mm->lru_gen.bitmap = 0;
703  #ifdef CONFIG_MEMCG
704  	mm->lru_gen.memcg = NULL;
705  #endif
706  }
707  
lru_gen_use_mm(struct mm_struct * mm)708  static inline void lru_gen_use_mm(struct mm_struct *mm)
709  {
710  	/*
711  	 * When the bitmap is set, page reclaim knows this mm_struct has been
712  	 * used since the last time it cleared the bitmap. So it might be worth
713  	 * walking the page tables of this mm_struct to clear the accessed bit.
714  	 */
715  	WRITE_ONCE(mm->lru_gen.bitmap, -1);
716  }
717  
718  #else /* !CONFIG_LRU_GEN */
719  
lru_gen_add_mm(struct mm_struct * mm)720  static inline void lru_gen_add_mm(struct mm_struct *mm)
721  {
722  }
723  
lru_gen_del_mm(struct mm_struct * mm)724  static inline void lru_gen_del_mm(struct mm_struct *mm)
725  {
726  }
727  
728  #ifdef CONFIG_MEMCG
lru_gen_migrate_mm(struct mm_struct * mm)729  static inline void lru_gen_migrate_mm(struct mm_struct *mm)
730  {
731  }
732  #endif
733  
lru_gen_init_mm(struct mm_struct * mm)734  static inline void lru_gen_init_mm(struct mm_struct *mm)
735  {
736  }
737  
lru_gen_use_mm(struct mm_struct * mm)738  static inline void lru_gen_use_mm(struct mm_struct *mm)
739  {
740  }
741  
742  #endif /* CONFIG_LRU_GEN */
743  
744  struct mmu_gather;
745  extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm);
746  extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm);
747  extern void tlb_finish_mmu(struct mmu_gather *tlb);
748  
init_tlb_flush_pending(struct mm_struct * mm)749  static inline void init_tlb_flush_pending(struct mm_struct *mm)
750  {
751  	atomic_set(&mm->tlb_flush_pending, 0);
752  }
753  
inc_tlb_flush_pending(struct mm_struct * mm)754  static inline void inc_tlb_flush_pending(struct mm_struct *mm)
755  {
756  	atomic_inc(&mm->tlb_flush_pending);
757  	/*
758  	 * The only time this value is relevant is when there are indeed pages
759  	 * to flush. And we'll only flush pages after changing them, which
760  	 * requires the PTL.
761  	 *
762  	 * So the ordering here is:
763  	 *
764  	 *	atomic_inc(&mm->tlb_flush_pending);
765  	 *	spin_lock(&ptl);
766  	 *	...
767  	 *	set_pte_at();
768  	 *	spin_unlock(&ptl);
769  	 *
770  	 *				spin_lock(&ptl)
771  	 *				mm_tlb_flush_pending();
772  	 *				....
773  	 *				spin_unlock(&ptl);
774  	 *
775  	 *	flush_tlb_range();
776  	 *	atomic_dec(&mm->tlb_flush_pending);
777  	 *
778  	 * Where the increment if constrained by the PTL unlock, it thus
779  	 * ensures that the increment is visible if the PTE modification is
780  	 * visible. After all, if there is no PTE modification, nobody cares
781  	 * about TLB flushes either.
782  	 *
783  	 * This very much relies on users (mm_tlb_flush_pending() and
784  	 * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and
785  	 * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc
786  	 * locks (PPC) the unlock of one doesn't order against the lock of
787  	 * another PTL.
788  	 *
789  	 * The decrement is ordered by the flush_tlb_range(), such that
790  	 * mm_tlb_flush_pending() will not return false unless all flushes have
791  	 * completed.
792  	 */
793  }
794  
dec_tlb_flush_pending(struct mm_struct * mm)795  static inline void dec_tlb_flush_pending(struct mm_struct *mm)
796  {
797  	/*
798  	 * See inc_tlb_flush_pending().
799  	 *
800  	 * This cannot be smp_mb__before_atomic() because smp_mb() simply does
801  	 * not order against TLB invalidate completion, which is what we need.
802  	 *
803  	 * Therefore we must rely on tlb_flush_*() to guarantee order.
804  	 */
805  	atomic_dec(&mm->tlb_flush_pending);
806  }
807  
mm_tlb_flush_pending(struct mm_struct * mm)808  static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
809  {
810  	/*
811  	 * Must be called after having acquired the PTL; orders against that
812  	 * PTLs release and therefore ensures that if we observe the modified
813  	 * PTE we must also observe the increment from inc_tlb_flush_pending().
814  	 *
815  	 * That is, it only guarantees to return true if there is a flush
816  	 * pending for _this_ PTL.
817  	 */
818  	return atomic_read(&mm->tlb_flush_pending);
819  }
820  
mm_tlb_flush_nested(struct mm_struct * mm)821  static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
822  {
823  	/*
824  	 * Similar to mm_tlb_flush_pending(), we must have acquired the PTL
825  	 * for which there is a TLB flush pending in order to guarantee
826  	 * we've seen both that PTE modification and the increment.
827  	 *
828  	 * (no requirement on actually still holding the PTL, that is irrelevant)
829  	 */
830  	return atomic_read(&mm->tlb_flush_pending) > 1;
831  }
832  
833  struct vm_fault;
834  
835  /**
836   * typedef vm_fault_t - Return type for page fault handlers.
837   *
838   * Page fault handlers return a bitmask of %VM_FAULT values.
839   */
840  typedef __bitwise unsigned int vm_fault_t;
841  
842  /**
843   * enum vm_fault_reason - Page fault handlers return a bitmask of
844   * these values to tell the core VM what happened when handling the
845   * fault. Used to decide whether a process gets delivered SIGBUS or
846   * just gets major/minor fault counters bumped up.
847   *
848   * @VM_FAULT_OOM:		Out Of Memory
849   * @VM_FAULT_SIGBUS:		Bad access
850   * @VM_FAULT_MAJOR:		Page read from storage
851   * @VM_FAULT_WRITE:		Special case for get_user_pages
852   * @VM_FAULT_HWPOISON:		Hit poisoned small page
853   * @VM_FAULT_HWPOISON_LARGE:	Hit poisoned large page. Index encoded
854   *				in upper bits
855   * @VM_FAULT_SIGSEGV:		segmentation fault
856   * @VM_FAULT_NOPAGE:		->fault installed the pte, not return page
857   * @VM_FAULT_LOCKED:		->fault locked the returned page
858   * @VM_FAULT_RETRY:		->fault blocked, must retry
859   * @VM_FAULT_FALLBACK:		huge page fault failed, fall back to small
860   * @VM_FAULT_DONE_COW:		->fault has fully handled COW
861   * @VM_FAULT_NEEDDSYNC:		->fault did not modify page tables and needs
862   *				fsync() to complete (for synchronous page faults
863   *				in DAX)
864   * @VM_FAULT_HINDEX_MASK:	mask HINDEX value
865   *
866   */
867  enum vm_fault_reason {
868  	VM_FAULT_OOM            = (__force vm_fault_t)0x000001,
869  	VM_FAULT_SIGBUS         = (__force vm_fault_t)0x000002,
870  	VM_FAULT_MAJOR          = (__force vm_fault_t)0x000004,
871  	VM_FAULT_WRITE          = (__force vm_fault_t)0x000008,
872  	VM_FAULT_HWPOISON       = (__force vm_fault_t)0x000010,
873  	VM_FAULT_HWPOISON_LARGE = (__force vm_fault_t)0x000020,
874  	VM_FAULT_SIGSEGV        = (__force vm_fault_t)0x000040,
875  	VM_FAULT_NOPAGE         = (__force vm_fault_t)0x000100,
876  	VM_FAULT_LOCKED         = (__force vm_fault_t)0x000200,
877  	VM_FAULT_RETRY          = (__force vm_fault_t)0x000400,
878  	VM_FAULT_FALLBACK       = (__force vm_fault_t)0x000800,
879  	VM_FAULT_DONE_COW       = (__force vm_fault_t)0x001000,
880  	VM_FAULT_NEEDDSYNC      = (__force vm_fault_t)0x002000,
881  	VM_FAULT_HINDEX_MASK    = (__force vm_fault_t)0x0f0000,
882  };
883  
884  /* Encode hstate index for a hwpoisoned large page */
885  #define VM_FAULT_SET_HINDEX(x) ((__force vm_fault_t)((x) << 16))
886  #define VM_FAULT_GET_HINDEX(x) (((__force unsigned int)(x) >> 16) & 0xf)
887  
888  #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS |	\
889  			VM_FAULT_SIGSEGV | VM_FAULT_HWPOISON |	\
890  			VM_FAULT_HWPOISON_LARGE | VM_FAULT_FALLBACK)
891  
892  #define VM_FAULT_RESULT_TRACE \
893  	{ VM_FAULT_OOM,                 "OOM" },	\
894  	{ VM_FAULT_SIGBUS,              "SIGBUS" },	\
895  	{ VM_FAULT_MAJOR,               "MAJOR" },	\
896  	{ VM_FAULT_WRITE,               "WRITE" },	\
897  	{ VM_FAULT_HWPOISON,            "HWPOISON" },	\
898  	{ VM_FAULT_HWPOISON_LARGE,      "HWPOISON_LARGE" },	\
899  	{ VM_FAULT_SIGSEGV,             "SIGSEGV" },	\
900  	{ VM_FAULT_NOPAGE,              "NOPAGE" },	\
901  	{ VM_FAULT_LOCKED,              "LOCKED" },	\
902  	{ VM_FAULT_RETRY,               "RETRY" },	\
903  	{ VM_FAULT_FALLBACK,            "FALLBACK" },	\
904  	{ VM_FAULT_DONE_COW,            "DONE_COW" },	\
905  	{ VM_FAULT_NEEDDSYNC,           "NEEDDSYNC" }
906  
907  struct vm_special_mapping {
908  	const char *name;	/* The name, e.g. "[vdso]". */
909  
910  	/*
911  	 * If .fault is not provided, this points to a
912  	 * NULL-terminated array of pages that back the special mapping.
913  	 *
914  	 * This must not be NULL unless .fault is provided.
915  	 */
916  	struct page **pages;
917  
918  	/*
919  	 * If non-NULL, then this is called to resolve page faults
920  	 * on the special mapping.  If used, .pages is not checked.
921  	 */
922  	vm_fault_t (*fault)(const struct vm_special_mapping *sm,
923  				struct vm_area_struct *vma,
924  				struct vm_fault *vmf);
925  
926  	int (*mremap)(const struct vm_special_mapping *sm,
927  		     struct vm_area_struct *new_vma);
928  };
929  
930  enum tlb_flush_reason {
931  	TLB_FLUSH_ON_TASK_SWITCH,
932  	TLB_REMOTE_SHOOTDOWN,
933  	TLB_LOCAL_SHOOTDOWN,
934  	TLB_LOCAL_MM_SHOOTDOWN,
935  	TLB_REMOTE_SEND_IPI,
936  	NR_TLB_FLUSH_REASONS,
937  };
938  
939   /*
940    * A swap entry has to fit into a "unsigned long", as the entry is hidden
941    * in the "index" field of the swapper address space.
942    */
943  typedef struct {
944  	unsigned long val;
945  } swp_entry_t;
946  
947  #endif /* _LINUX_MM_TYPES_H */
948