• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MM_TYPES_H
3 #define _LINUX_MM_TYPES_H
4 
5 #include <linux/mm_types_task.h>
6 
7 #include <linux/auxvec.h>
8 #include <linux/kref.h>
9 #include <linux/list.h>
10 #include <linux/spinlock.h>
11 #include <linux/rbtree.h>
12 #include <linux/maple_tree.h>
13 #include <linux/rwsem.h>
14 #include <linux/completion.h>
15 #include <linux/cpumask.h>
16 #include <linux/uprobes.h>
17 #include <linux/rcupdate.h>
18 #include <linux/page-flags-layout.h>
19 #include <linux/workqueue.h>
20 #include <linux/seqlock.h>
21 #include <linux/android_kabi.h>
22 
23 #include <asm/mmu.h>
24 
25 #ifndef AT_VECTOR_SIZE_ARCH
26 #define AT_VECTOR_SIZE_ARCH 0
27 #endif
28 #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
29 
30 #define INIT_PASID	0
31 
32 struct address_space;
33 struct mem_cgroup;
34 
35 /*
36  * Each physical page in the system has a struct page associated with
37  * it to keep track of whatever it is we are using the page for at the
38  * moment. Note that we have no way to track which tasks are using
39  * a page, though if it is a pagecache page, rmap structures can tell us
40  * who is mapping it.
41  *
42  * If you allocate the page using alloc_pages(), you can use some of the
43  * space in struct page for your own purposes.  The five words in the main
44  * union are available, except for bit 0 of the first word which must be
45  * kept clear.  Many users use this word to store a pointer to an object
46  * which is guaranteed to be aligned.  If you use the same storage as
47  * page->mapping, you must restore it to NULL before freeing the page.
48  *
49  * If your page will not be mapped to userspace, you can also use the four
50  * bytes in the mapcount union, but you must call page_mapcount_reset()
51  * before freeing it.
52  *
53  * If you want to use the refcount field, it must be used in such a way
54  * that other CPUs temporarily incrementing and then decrementing the
55  * refcount does not cause problems.  On receiving the page from
56  * alloc_pages(), the refcount will be positive.
57  *
58  * If you allocate pages of order > 0, you can use some of the fields
59  * in each subpage, but you may need to restore some of their values
60  * afterwards.
61  *
62  * SLUB uses cmpxchg_double() to atomically update its freelist and counters.
63  * That requires that freelist & counters in struct slab be adjacent and
64  * double-word aligned. Because struct slab currently just reinterprets the
65  * bits of struct page, we align all struct pages to double-word boundaries,
66  * and ensure that 'freelist' is aligned within struct slab.
67  */
68 #ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
69 #define _struct_page_alignment	__aligned(2 * sizeof(unsigned long))
70 #else
71 #define _struct_page_alignment
72 #endif
73 
74 struct page {
75 	unsigned long flags;		/* Atomic flags, some possibly
76 					 * updated asynchronously */
77 	/*
78 	 * Five words (20/40 bytes) are available in this union.
79 	 * WARNING: bit 0 of the first word is used for PageTail(). That
80 	 * means the other users of this union MUST NOT use the bit to
81 	 * avoid collision and false-positive PageTail().
82 	 */
83 	union {
84 		struct {	/* Page cache and anonymous pages */
85 			/**
86 			 * @lru: Pageout list, eg. active_list protected by
87 			 * lruvec->lru_lock.  Sometimes used as a generic list
88 			 * by the page owner.
89 			 */
90 			union {
91 				struct list_head lru;
92 
93 				/* Or, for the Unevictable "LRU list" slot */
94 				struct {
95 					/* Always even, to negate PageTail */
96 					void *__filler;
97 					/* Count page's or folio's mlocks */
98 					unsigned int mlock_count;
99 				};
100 
101 				/* Or, free page */
102 				struct list_head buddy_list;
103 				struct list_head pcp_list;
104 			};
105 			/* See page-flags.h for PAGE_MAPPING_FLAGS */
106 			struct address_space *mapping;
107 			pgoff_t index;		/* Our offset within mapping. */
108 			/**
109 			 * @private: Mapping-private opaque data.
110 			 * Usually used for buffer_heads if PagePrivate.
111 			 * Used for swp_entry_t if PageSwapCache.
112 			 * Indicates order in the buddy system if PageBuddy.
113 			 */
114 			unsigned long private;
115 		};
116 		struct {	/* page_pool used by netstack */
117 			/**
118 			 * @pp_magic: magic value to avoid recycling non
119 			 * page_pool allocated pages.
120 			 */
121 			unsigned long pp_magic;
122 			struct page_pool *pp;
123 			unsigned long _pp_mapping_pad;
124 			unsigned long dma_addr;
125 			union {
126 				/**
127 				 * dma_addr_upper: might require a 64-bit
128 				 * value on 32-bit architectures.
129 				 */
130 				unsigned long dma_addr_upper;
131 				/**
132 				 * For frag page support, not supported in
133 				 * 32-bit architectures with 64-bit DMA.
134 				 */
135 				atomic_long_t pp_frag_count;
136 			};
137 		};
138 		struct {	/* Tail pages of compound page */
139 			unsigned long compound_head;	/* Bit zero is set */
140 
141 			/* First tail page only */
142 			unsigned char compound_dtor;
143 			unsigned char compound_order;
144 			atomic_t compound_mapcount;
145 			atomic_t compound_pincount;
146 #ifdef CONFIG_64BIT
147 			unsigned int compound_nr; /* 1 << compound_order */
148 #endif
149 		};
150 		struct {	/* Second tail page of compound page */
151 			unsigned long _compound_pad_1;	/* compound_head */
152 			unsigned long _compound_pad_2;
153 			/* For both global and memcg */
154 			struct list_head deferred_list;
155 		};
156 		struct {	/* Page table pages */
157 			unsigned long _pt_pad_1;	/* compound_head */
158 			pgtable_t pmd_huge_pte; /* protected by page->ptl */
159 			unsigned long _pt_pad_2;	/* mapping */
160 			union {
161 				struct mm_struct *pt_mm; /* x86 pgds only */
162 				atomic_t pt_frag_refcount; /* powerpc */
163 			};
164 #if ALLOC_SPLIT_PTLOCKS
165 			spinlock_t *ptl;
166 #else
167 			spinlock_t ptl;
168 #endif
169 		};
170 		struct {	/* ZONE_DEVICE pages */
171 			/** @pgmap: Points to the hosting device page map. */
172 			struct dev_pagemap *pgmap;
173 			void *zone_device_data;
174 			/*
175 			 * ZONE_DEVICE private pages are counted as being
176 			 * mapped so the next 3 words hold the mapping, index,
177 			 * and private fields from the source anonymous or
178 			 * page cache page while the page is migrated to device
179 			 * private memory.
180 			 * ZONE_DEVICE MEMORY_DEVICE_FS_DAX pages also
181 			 * use the mapping, index, and private fields when
182 			 * pmem backed DAX files are mapped.
183 			 */
184 		};
185 
186 		/** @rcu_head: You can use this to free a page by RCU. */
187 		struct rcu_head rcu_head;
188 	};
189 
190 	union {		/* This union is 4 bytes in size. */
191 		/*
192 		 * If the page can be mapped to userspace, encodes the number
193 		 * of times this page is referenced by a page table.
194 		 */
195 		atomic_t _mapcount;
196 
197 		/*
198 		 * If the page is neither PageSlab nor mappable to userspace,
199 		 * the value stored here may help determine what this page
200 		 * is used for.  See page-flags.h for a list of page types
201 		 * which are currently stored here.
202 		 */
203 		unsigned int page_type;
204 	};
205 
206 	/* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */
207 	atomic_t _refcount;
208 
209 #ifdef CONFIG_MEMCG
210 	unsigned long memcg_data;
211 #endif
212 
213 	/*
214 	 * On machines where all RAM is mapped into kernel address space,
215 	 * we can simply calculate the virtual address. On machines with
216 	 * highmem some memory is mapped into kernel virtual memory
217 	 * dynamically, so we need a place to store that address.
218 	 * Note that this field could be 16 bits on x86 ... ;)
219 	 *
220 	 * Architectures with slow multiplication can define
221 	 * WANT_PAGE_VIRTUAL in asm/page.h
222 	 */
223 #if defined(WANT_PAGE_VIRTUAL)
224 	void *virtual;			/* Kernel virtual address (NULL if
225 					   not kmapped, ie. highmem) */
226 #endif /* WANT_PAGE_VIRTUAL */
227 
228 #ifdef CONFIG_KMSAN
229 	/*
230 	 * KMSAN metadata for this page:
231 	 *  - shadow page: every bit indicates whether the corresponding
232 	 *    bit of the original page is initialized (0) or not (1);
233 	 *  - origin page: every 4 bytes contain an id of the stack trace
234 	 *    where the uninitialized value was created.
235 	 */
236 	struct page *kmsan_shadow;
237 	struct page *kmsan_origin;
238 #endif
239 
240 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
241 	int _last_cpupid;
242 #endif
243 } _struct_page_alignment;
244 
245 /**
246  * struct folio - Represents a contiguous set of bytes.
247  * @flags: Identical to the page flags.
248  * @lru: Least Recently Used list; tracks how recently this folio was used.
249  * @mlock_count: Number of times this folio has been pinned by mlock().
250  * @mapping: The file this page belongs to, or refers to the anon_vma for
251  *    anonymous memory.
252  * @index: Offset within the file, in units of pages.  For anonymous memory,
253  *    this is the index from the beginning of the mmap.
254  * @private: Filesystem per-folio data (see folio_attach_private()).
255  *    Used for swp_entry_t if folio_test_swapcache().
256  * @_mapcount: Do not access this member directly.  Use folio_mapcount() to
257  *    find out how many times this folio is mapped by userspace.
258  * @_refcount: Do not access this member directly.  Use folio_ref_count()
259  *    to find how many references there are to this folio.
260  * @memcg_data: Memory Control Group data.
261  * @_flags_1: For large folios, additional page flags.
262  * @__head: Points to the folio.  Do not use.
263  * @_folio_dtor: Which destructor to use for this folio.
264  * @_folio_order: Do not use directly, call folio_order().
265  * @_total_mapcount: Do not use directly, call folio_entire_mapcount().
266  * @_pincount: Do not use directly, call folio_maybe_dma_pinned().
267  * @_folio_nr_pages: Do not use directly, call folio_nr_pages().
268  *
269  * A folio is a physically, virtually and logically contiguous set
270  * of bytes.  It is a power-of-two in size, and it is aligned to that
271  * same power-of-two.  It is at least as large as %PAGE_SIZE.  If it is
272  * in the page cache, it is at a file offset which is a multiple of that
273  * power-of-two.  It may be mapped into userspace at an address which is
274  * at an arbitrary page offset, but its kernel virtual address is aligned
275  * to its size.
276  */
277 struct folio {
278 	/* private: don't document the anon union */
279 	union {
280 		struct {
281 	/* public: */
282 			unsigned long flags;
283 			union {
284 				struct list_head lru;
285 	/* private: avoid cluttering the output */
286 				struct {
287 					void *__filler;
288 	/* public: */
289 					unsigned int mlock_count;
290 	/* private: */
291 				};
292 	/* public: */
293 			};
294 			struct address_space *mapping;
295 			pgoff_t index;
296 			void *private;
297 			atomic_t _mapcount;
298 			atomic_t _refcount;
299 #ifdef CONFIG_MEMCG
300 			unsigned long memcg_data;
301 #endif
302 	/* private: the union with struct page is transitional */
303 		};
304 		struct page page;
305 	};
306 	unsigned long _flags_1;
307 	unsigned long __head;
308 	unsigned char _folio_dtor;
309 	unsigned char _folio_order;
310 	atomic_t _total_mapcount;
311 	atomic_t _pincount;
312 #ifdef CONFIG_64BIT
313 	unsigned int _folio_nr_pages;
314 #endif
315 };
316 
317 #define FOLIO_MATCH(pg, fl)						\
318 	static_assert(offsetof(struct page, pg) == offsetof(struct folio, fl))
319 FOLIO_MATCH(flags, flags);
320 FOLIO_MATCH(lru, lru);
321 FOLIO_MATCH(mapping, mapping);
322 FOLIO_MATCH(compound_head, lru);
323 FOLIO_MATCH(index, index);
324 FOLIO_MATCH(private, private);
325 FOLIO_MATCH(_mapcount, _mapcount);
326 FOLIO_MATCH(_refcount, _refcount);
327 #ifdef CONFIG_MEMCG
328 FOLIO_MATCH(memcg_data, memcg_data);
329 #endif
330 #undef FOLIO_MATCH
331 #define FOLIO_MATCH(pg, fl)						\
332 	static_assert(offsetof(struct folio, fl) ==			\
333 			offsetof(struct page, pg) + sizeof(struct page))
334 FOLIO_MATCH(flags, _flags_1);
335 FOLIO_MATCH(compound_head, __head);
336 FOLIO_MATCH(compound_dtor, _folio_dtor);
337 FOLIO_MATCH(compound_order, _folio_order);
338 FOLIO_MATCH(compound_mapcount, _total_mapcount);
339 FOLIO_MATCH(compound_pincount, _pincount);
340 #ifdef CONFIG_64BIT
341 FOLIO_MATCH(compound_nr, _folio_nr_pages);
342 #endif
343 #undef FOLIO_MATCH
344 
folio_mapcount_ptr(struct folio * folio)345 static inline atomic_t *folio_mapcount_ptr(struct folio *folio)
346 {
347 	struct page *tail = &folio->page + 1;
348 	return &tail->compound_mapcount;
349 }
350 
compound_mapcount_ptr(struct page * page)351 static inline atomic_t *compound_mapcount_ptr(struct page *page)
352 {
353 	return &page[1].compound_mapcount;
354 }
355 
compound_pincount_ptr(struct page * page)356 static inline atomic_t *compound_pincount_ptr(struct page *page)
357 {
358 	return &page[1].compound_pincount;
359 }
360 
361 /*
362  * Used for sizing the vmemmap region on some architectures
363  */
364 #define STRUCT_PAGE_MAX_SHIFT	(order_base_2(sizeof(struct page)))
365 
366 #define PAGE_FRAG_CACHE_MAX_SIZE	__ALIGN_MASK(32768, ~PAGE_MASK)
367 #define PAGE_FRAG_CACHE_MAX_ORDER	get_order(PAGE_FRAG_CACHE_MAX_SIZE)
368 
369 /*
370  * page_private can be used on tail pages.  However, PagePrivate is only
371  * checked by the VM on the head page.  So page_private on the tail pages
372  * should be used for data that's ancillary to the head page (eg attaching
373  * buffer heads to tail pages after attaching buffer heads to the head page)
374  */
375 #define page_private(page)		((page)->private)
376 
set_page_private(struct page * page,unsigned long private)377 static inline void set_page_private(struct page *page, unsigned long private)
378 {
379 	page->private = private;
380 }
381 
folio_get_private(struct folio * folio)382 static inline void *folio_get_private(struct folio *folio)
383 {
384 	return folio->private;
385 }
386 
387 struct page_frag_cache {
388 	void * va;
389 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
390 	__u16 offset;
391 	__u16 size;
392 #else
393 	__u32 offset;
394 #endif
395 	/* we maintain a pagecount bias, so that we dont dirty cache line
396 	 * containing page->_refcount every time we allocate a fragment.
397 	 */
398 	unsigned int		pagecnt_bias;
399 	bool pfmemalloc;
400 };
401 
402 typedef unsigned long vm_flags_t;
403 
404 /*
405  * A region containing a mapping of a non-memory backed file under NOMMU
406  * conditions.  These are held in a global tree and are pinned by the VMAs that
407  * map parts of them.
408  */
409 struct vm_region {
410 	struct rb_node	vm_rb;		/* link in global region tree */
411 	vm_flags_t	vm_flags;	/* VMA vm_flags */
412 	unsigned long	vm_start;	/* start address of region */
413 	unsigned long	vm_end;		/* region initialised to here */
414 	unsigned long	vm_top;		/* region allocated to here */
415 	unsigned long	vm_pgoff;	/* the offset in vm_file corresponding to vm_start */
416 	struct file	*vm_file;	/* the backing file or NULL */
417 
418 	int		vm_usage;	/* region usage count (access under nommu_region_sem) */
419 	bool		vm_icache_flushed : 1; /* true if the icache has been flushed for
420 						* this region */
421 };
422 
423 #ifdef CONFIG_USERFAULTFD
424 #define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) { NULL, })
425 struct vm_userfaultfd_ctx {
426 	struct userfaultfd_ctx *ctx;
427 };
428 #else /* CONFIG_USERFAULTFD */
429 #define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) {})
430 struct vm_userfaultfd_ctx {};
431 #endif /* CONFIG_USERFAULTFD */
432 
433 struct anon_vma_name {
434 	struct kref kref;
435 	/* The name needs to be at the end because it is dynamically sized. */
436 	char name[];
437 };
438 
439 struct vma_lock {
440 	struct rw_semaphore lock;
441 };
442 
443 /*
444  * This struct describes a virtual memory area. There is one of these
445  * per VM-area/task. A VM area is any part of the process virtual memory
446  * space that has a special rule for the page-fault handlers (ie a shared
447  * library, the executable area etc).
448  */
449 struct vm_area_struct {
450 	/* The first cache line has the info for VMA tree walking. */
451 
452 	union {
453 		struct {
454 			/* VMA covers [vm_start; vm_end) addresses within mm */
455 			unsigned long vm_start;
456 			unsigned long vm_end;
457 		};
458 #ifdef CONFIG_PER_VMA_LOCK
459 		struct rcu_head vm_rcu;	/* Used for deferred freeing. */
460 #endif
461 	};
462 
463 	struct mm_struct *vm_mm;	/* The address space we belong to. */
464 
465 	/*
466 	 * Access permissions of this VMA.
467 	 * See vmf_insert_mixed_prot() for discussion.
468 	 */
469 	pgprot_t vm_page_prot;
470 
471 	/*
472 	 * Flags, see mm.h.
473 	 * To modify use vm_flags_{init|reset|set|clear|mod} functions.
474 	 */
475 	union {
476 		const vm_flags_t vm_flags;
477 		vm_flags_t __private __vm_flags;
478 	};
479 
480 #ifdef CONFIG_PER_VMA_LOCK
481 	/*
482 	 * Can only be written (using WRITE_ONCE()) while holding both:
483 	 *  - mmap_lock (in write mode)
484 	 *  - vm_lock->lock (in write mode)
485 	 * Can be read reliably while holding one of:
486 	 *  - mmap_lock (in read or write mode)
487 	 *  - vm_lock->lock (in read or write mode)
488 	 * Can be read unreliably (using READ_ONCE()) for pessimistic bailout
489 	 * while holding nothing (except RCU to keep the VMA struct allocated).
490 	 *
491 	 * This sequence counter is explicitly allowed to overflow; sequence
492 	 * counter reuse can only lead to occasional unnecessary use of the
493 	 * slowpath.
494 	 */
495 	int vm_lock_seq;
496 	struct vma_lock *vm_lock;
497 
498 	/* Flag to indicate areas detached from the mm->mm_mt tree */
499 	bool detached;
500 #endif
501 
502 	/*
503 	 * For areas with an address space and backing store,
504 	 * linkage into the address_space->i_mmap interval tree.
505 	 *
506 	 * For private anonymous mappings, a pointer to a null terminated string
507 	 * containing the name given to the vma, or NULL if unnamed.
508 	 */
509 
510 	union {
511 		struct {
512 			struct rb_node rb;
513 			unsigned long rb_subtree_last;
514 		} shared;
515 		/*
516 		 * Serialized by mmap_sem. Never use directly because it is
517 		 * valid only when vm_file is NULL. Use anon_vma_name instead.
518 		 */
519 		struct anon_vma_name *anon_name;
520 	};
521 
522 	/*
523 	 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
524 	 * list, after a COW of one of the file pages.	A MAP_SHARED vma
525 	 * can only be in the i_mmap tree.  An anonymous MAP_PRIVATE, stack
526 	 * or brk vma (with NULL file) can only be in an anon_vma list.
527 	 */
528 	struct list_head anon_vma_chain; /* Serialized by mmap_lock &
529 					  * page_table_lock */
530 	struct anon_vma *anon_vma;	/* Serialized by page_table_lock */
531 
532 	/* Function pointers to deal with this struct. */
533 	const struct vm_operations_struct *vm_ops;
534 
535 	/* Information about our backing store: */
536 	unsigned long vm_pgoff;		/* Offset (within vm_file) in PAGE_SIZE
537 					   units */
538 	struct file * vm_file;		/* File we map to (can be NULL). */
539 	void * vm_private_data;		/* was vm_pte (shared mem) */
540 
541 #ifdef CONFIG_SWAP
542 	atomic_long_t swap_readahead_info;
543 #endif
544 #ifndef CONFIG_MMU
545 	struct vm_region *vm_region;	/* NOMMU mapping region */
546 #endif
547 #ifdef CONFIG_NUMA
548 	struct mempolicy *vm_policy;	/* NUMA policy for the VMA */
549 #endif
550 	struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
551 
552 	ANDROID_KABI_RESERVE(1);
553 	ANDROID_KABI_RESERVE(2);
554 	ANDROID_KABI_RESERVE(3);
555 	ANDROID_KABI_RESERVE(4);
556 } __randomize_layout;
557 
558 struct kioctx_table;
559 struct mm_struct {
560 	struct {
561 		struct maple_tree mm_mt;
562 #ifdef CONFIG_MMU
563 		unsigned long (*get_unmapped_area) (struct file *filp,
564 				unsigned long addr, unsigned long len,
565 				unsigned long pgoff, unsigned long flags);
566 #endif
567 		unsigned long mmap_base;	/* base of mmap area */
568 		unsigned long mmap_legacy_base;	/* base of mmap area in bottom-up allocations */
569 #ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
570 		/* Base addresses for compatible mmap() */
571 		unsigned long mmap_compat_base;
572 		unsigned long mmap_compat_legacy_base;
573 #endif
574 		unsigned long task_size;	/* size of task vm space */
575 		pgd_t * pgd;
576 
577 #ifdef CONFIG_MEMBARRIER
578 		/**
579 		 * @membarrier_state: Flags controlling membarrier behavior.
580 		 *
581 		 * This field is close to @pgd to hopefully fit in the same
582 		 * cache-line, which needs to be touched by switch_mm().
583 		 */
584 		atomic_t membarrier_state;
585 #endif
586 
587 		/**
588 		 * @mm_users: The number of users including userspace.
589 		 *
590 		 * Use mmget()/mmget_not_zero()/mmput() to modify. When this
591 		 * drops to 0 (i.e. when the task exits and there are no other
592 		 * temporary reference holders), we also release a reference on
593 		 * @mm_count (which may then free the &struct mm_struct if
594 		 * @mm_count also drops to 0).
595 		 */
596 		atomic_t mm_users;
597 
598 		/**
599 		 * @mm_count: The number of references to &struct mm_struct
600 		 * (@mm_users count as 1).
601 		 *
602 		 * Use mmgrab()/mmdrop() to modify. When this drops to 0, the
603 		 * &struct mm_struct is freed.
604 		 */
605 		atomic_t mm_count;
606 
607 #ifdef CONFIG_MMU
608 		atomic_long_t pgtables_bytes;	/* PTE page table pages */
609 #endif
610 		int map_count;			/* number of VMAs */
611 
612 		spinlock_t page_table_lock; /* Protects page tables and some
613 					     * counters
614 					     */
615 		/*
616 		 * With some kernel config, the current mmap_lock's offset
617 		 * inside 'mm_struct' is at 0x120, which is very optimal, as
618 		 * its two hot fields 'count' and 'owner' sit in 2 different
619 		 * cachelines,  and when mmap_lock is highly contended, both
620 		 * of the 2 fields will be accessed frequently, current layout
621 		 * will help to reduce cache bouncing.
622 		 *
623 		 * So please be careful with adding new fields before
624 		 * mmap_lock, which can easily push the 2 fields into one
625 		 * cacheline.
626 		 */
627 		struct rw_semaphore mmap_lock;
628 
629 		struct list_head mmlist; /* List of maybe swapped mm's.	These
630 					  * are globally strung together off
631 					  * init_mm.mmlist, and are protected
632 					  * by mmlist_lock
633 					  */
634 #ifdef CONFIG_PER_VMA_LOCK
635 		/*
636 		 * This field has lock-like semantics, meaning it is sometimes
637 		 * accessed with ACQUIRE/RELEASE semantics.
638 		 * Roughly speaking, incrementing the sequence number is
639 		 * equivalent to releasing locks on VMAs; reading the sequence
640 		 * number can be part of taking a read lock on a VMA.
641 		 *
642 		 * Can be modified under write mmap_lock using RELEASE
643 		 * semantics.
644 		 * Can be read with no other protection when holding write
645 		 * mmap_lock.
646 		 * Can be read with ACQUIRE semantics if not holding write
647 		 * mmap_lock.
648 		 */
649 		int mm_lock_seq;
650 #endif
651 
652 
653 		unsigned long hiwater_rss; /* High-watermark of RSS usage */
654 		unsigned long hiwater_vm;  /* High-water virtual memory usage */
655 
656 		unsigned long total_vm;	   /* Total pages mapped */
657 		unsigned long locked_vm;   /* Pages that have PG_mlocked set */
658 		atomic64_t    pinned_vm;   /* Refcount permanently increased */
659 		unsigned long data_vm;	   /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
660 		unsigned long exec_vm;	   /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
661 		unsigned long stack_vm;	   /* VM_STACK */
662 		unsigned long def_flags;
663 
664 		/**
665 		 * @write_protect_seq: Locked when any thread is write
666 		 * protecting pages mapped by this mm to enforce a later COW,
667 		 * for instance during page table copying for fork().
668 		 */
669 		seqcount_t write_protect_seq;
670 
671 		spinlock_t arg_lock; /* protect the below fields */
672 
673 		unsigned long start_code, end_code, start_data, end_data;
674 		unsigned long start_brk, brk, start_stack;
675 		unsigned long arg_start, arg_end, env_start, env_end;
676 
677 		unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
678 
679 		/*
680 		 * Special counters, in some configurations protected by the
681 		 * page_table_lock, in other configurations by being atomic.
682 		 */
683 		struct mm_rss_stat rss_stat;
684 
685 		struct linux_binfmt *binfmt;
686 
687 		/* Architecture-specific MM context */
688 		mm_context_t context;
689 
690 		unsigned long flags; /* Must use atomic bitops to access */
691 
692 #ifdef CONFIG_AIO
693 		spinlock_t			ioctx_lock;
694 		struct kioctx_table __rcu	*ioctx_table;
695 #endif
696 #ifdef CONFIG_MEMCG
697 		/*
698 		 * "owner" points to a task that is regarded as the canonical
699 		 * user/owner of this mm. All of the following must be true in
700 		 * order for it to be changed:
701 		 *
702 		 * current == mm->owner
703 		 * current->mm != mm
704 		 * new_owner->mm == mm
705 		 * new_owner->alloc_lock is held
706 		 */
707 		struct task_struct __rcu *owner;
708 #endif
709 		struct user_namespace *user_ns;
710 
711 		/* store ref to file /proc/<pid>/exe symlink points to */
712 		struct file __rcu *exe_file;
713 #ifdef CONFIG_MMU_NOTIFIER
714 		struct mmu_notifier_subscriptions *notifier_subscriptions;
715 #endif
716 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
717 		pgtable_t pmd_huge_pte; /* protected by page_table_lock */
718 #endif
719 #ifdef CONFIG_NUMA_BALANCING
720 		/*
721 		 * numa_next_scan is the next time that PTEs will be remapped
722 		 * PROT_NONE to trigger NUMA hinting faults; such faults gather
723 		 * statistics and migrate pages to new nodes if necessary.
724 		 */
725 		unsigned long numa_next_scan;
726 
727 		/* Restart point for scanning and remapping PTEs. */
728 		unsigned long numa_scan_offset;
729 
730 		/* numa_scan_seq prevents two threads remapping PTEs. */
731 		int numa_scan_seq;
732 #endif
733 		/*
734 		 * An operation with batched TLB flushing is going on. Anything
735 		 * that can move process memory needs to flush the TLB when
736 		 * moving a PROT_NONE mapped page.
737 		 */
738 		atomic_t tlb_flush_pending;
739 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
740 		/* See flush_tlb_batched_pending() */
741 		atomic_t tlb_flush_batched;
742 #endif
743 		struct uprobes_state uprobes_state;
744 #ifdef CONFIG_PREEMPT_RT
745 		struct rcu_head delayed_drop;
746 #endif
747 #ifdef CONFIG_HUGETLB_PAGE
748 		atomic_long_t hugetlb_usage;
749 #endif
750 		struct work_struct async_put_work;
751 
752 #ifdef CONFIG_IOMMU_SVA
753 		u32 pasid;
754 #endif
755 #ifdef CONFIG_KSM
756 		/*
757 		 * Represent how many pages of this process are involved in KSM
758 		 * merging.
759 		 */
760 		unsigned long ksm_merging_pages;
761 		/*
762 		 * Represent how many pages are checked for ksm merging
763 		 * including merged and not merged.
764 		 */
765 		unsigned long ksm_rmap_items;
766 #endif
767 #ifdef CONFIG_LRU_GEN
768 		struct {
769 			/* this mm_struct is on lru_gen_mm_list */
770 			struct list_head list;
771 			/*
772 			 * Set when switching to this mm_struct, as a hint of
773 			 * whether it has been used since the last time per-node
774 			 * page table walkers cleared the corresponding bits.
775 			 */
776 			unsigned long bitmap;
777 #ifdef CONFIG_MEMCG
778 			/* points to the memcg of "owner" above */
779 			struct mem_cgroup *memcg;
780 #endif
781 		} lru_gen;
782 #endif /* CONFIG_LRU_GEN */
783 
784 		ANDROID_KABI_RESERVE(1);
785 	} __randomize_layout;
786 
787 	/*
788 	 * The mm_cpumask needs to be at the end of mm_struct, because it
789 	 * is dynamically sized based on nr_cpu_ids.
790 	 */
791 	unsigned long cpu_bitmap[];
792 };
793 
794 #define MM_MT_FLAGS	(MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN | \
795 			 MT_FLAGS_USE_RCU)
796 extern struct mm_struct init_mm;
797 
798 /* Pointer magic because the dynamic array size confuses some compilers. */
mm_init_cpumask(struct mm_struct * mm)799 static inline void mm_init_cpumask(struct mm_struct *mm)
800 {
801 	unsigned long cpu_bitmap = (unsigned long)mm;
802 
803 	cpu_bitmap += offsetof(struct mm_struct, cpu_bitmap);
804 	cpumask_clear((struct cpumask *)cpu_bitmap);
805 }
806 
807 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
mm_cpumask(struct mm_struct * mm)808 static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
809 {
810 	return (struct cpumask *)&mm->cpu_bitmap;
811 }
812 
813 #ifdef CONFIG_LRU_GEN
814 
815 struct lru_gen_mm_list {
816 	/* mm_struct list for page table walkers */
817 	struct list_head fifo;
818 	/* protects the list above */
819 	spinlock_t lock;
820 };
821 
822 void lru_gen_add_mm(struct mm_struct *mm);
823 void lru_gen_del_mm(struct mm_struct *mm);
824 #ifdef CONFIG_MEMCG
825 void lru_gen_migrate_mm(struct mm_struct *mm);
826 #endif
827 
lru_gen_init_mm(struct mm_struct * mm)828 static inline void lru_gen_init_mm(struct mm_struct *mm)
829 {
830 	INIT_LIST_HEAD(&mm->lru_gen.list);
831 	mm->lru_gen.bitmap = 0;
832 #ifdef CONFIG_MEMCG
833 	mm->lru_gen.memcg = NULL;
834 #endif
835 }
836 
lru_gen_use_mm(struct mm_struct * mm)837 static inline void lru_gen_use_mm(struct mm_struct *mm)
838 {
839 	/*
840 	 * When the bitmap is set, page reclaim knows this mm_struct has been
841 	 * used since the last time it cleared the bitmap. So it might be worth
842 	 * walking the page tables of this mm_struct to clear the accessed bit.
843 	 */
844 	WRITE_ONCE(mm->lru_gen.bitmap, -1);
845 }
846 
847 #else /* !CONFIG_LRU_GEN */
848 
lru_gen_add_mm(struct mm_struct * mm)849 static inline void lru_gen_add_mm(struct mm_struct *mm)
850 {
851 }
852 
lru_gen_del_mm(struct mm_struct * mm)853 static inline void lru_gen_del_mm(struct mm_struct *mm)
854 {
855 }
856 
857 #ifdef CONFIG_MEMCG
lru_gen_migrate_mm(struct mm_struct * mm)858 static inline void lru_gen_migrate_mm(struct mm_struct *mm)
859 {
860 }
861 #endif
862 
lru_gen_init_mm(struct mm_struct * mm)863 static inline void lru_gen_init_mm(struct mm_struct *mm)
864 {
865 }
866 
lru_gen_use_mm(struct mm_struct * mm)867 static inline void lru_gen_use_mm(struct mm_struct *mm)
868 {
869 }
870 
871 #endif /* CONFIG_LRU_GEN */
872 
873 struct vma_iterator {
874 	struct ma_state mas;
875 };
876 
877 #define VMA_ITERATOR(name, __mm, __addr)				\
878 	struct vma_iterator name = {					\
879 		.mas = {						\
880 			.tree = &(__mm)->mm_mt,				\
881 			.index = __addr,				\
882 			.node = MAS_START,				\
883 		},							\
884 	}
885 
vma_iter_init(struct vma_iterator * vmi,struct mm_struct * mm,unsigned long addr)886 static inline void vma_iter_init(struct vma_iterator *vmi,
887 		struct mm_struct *mm, unsigned long addr)
888 {
889 	vmi->mas.tree = &mm->mm_mt;
890 	vmi->mas.index = addr;
891 	vmi->mas.node = MAS_START;
892 }
893 
894 struct mmu_gather;
895 extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm);
896 extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm);
897 extern void tlb_finish_mmu(struct mmu_gather *tlb);
898 
899 struct vm_fault;
900 
901 /**
902  * typedef vm_fault_t - Return type for page fault handlers.
903  *
904  * Page fault handlers return a bitmask of %VM_FAULT values.
905  */
906 typedef __bitwise unsigned int vm_fault_t;
907 
908 /**
909  * enum vm_fault_reason - Page fault handlers return a bitmask of
910  * these values to tell the core VM what happened when handling the
911  * fault. Used to decide whether a process gets delivered SIGBUS or
912  * just gets major/minor fault counters bumped up.
913  *
914  * @VM_FAULT_OOM:		Out Of Memory
915  * @VM_FAULT_SIGBUS:		Bad access
916  * @VM_FAULT_MAJOR:		Page read from storage
917  * @VM_FAULT_WRITE:		Special case for get_user_pages
918  * @VM_FAULT_HWPOISON:		Hit poisoned small page
919  * @VM_FAULT_HWPOISON_LARGE:	Hit poisoned large page. Index encoded
920  *				in upper bits
921  * @VM_FAULT_SIGSEGV:		segmentation fault
922  * @VM_FAULT_NOPAGE:		->fault installed the pte, not return page
923  * @VM_FAULT_LOCKED:		->fault locked the returned page
924  * @VM_FAULT_RETRY:		->fault blocked, must retry
925  * @VM_FAULT_FALLBACK:		huge page fault failed, fall back to small
926  * @VM_FAULT_DONE_COW:		->fault has fully handled COW
927  * @VM_FAULT_NEEDDSYNC:		->fault did not modify page tables and needs
928  *				fsync() to complete (for synchronous page faults
929  *				in DAX)
930  * @VM_FAULT_COMPLETED:		->fault completed, meanwhile mmap lock released
931  * @VM_FAULT_HINDEX_MASK:	mask HINDEX value
932  *
933  */
934 enum vm_fault_reason {
935 	VM_FAULT_OOM            = (__force vm_fault_t)0x000001,
936 	VM_FAULT_SIGBUS         = (__force vm_fault_t)0x000002,
937 	VM_FAULT_MAJOR          = (__force vm_fault_t)0x000004,
938 	VM_FAULT_WRITE          = (__force vm_fault_t)0x000008,
939 	VM_FAULT_HWPOISON       = (__force vm_fault_t)0x000010,
940 	VM_FAULT_HWPOISON_LARGE = (__force vm_fault_t)0x000020,
941 	VM_FAULT_SIGSEGV        = (__force vm_fault_t)0x000040,
942 	VM_FAULT_NOPAGE         = (__force vm_fault_t)0x000100,
943 	VM_FAULT_LOCKED         = (__force vm_fault_t)0x000200,
944 	VM_FAULT_RETRY          = (__force vm_fault_t)0x000400,
945 	VM_FAULT_FALLBACK       = (__force vm_fault_t)0x000800,
946 	VM_FAULT_DONE_COW       = (__force vm_fault_t)0x001000,
947 	VM_FAULT_NEEDDSYNC      = (__force vm_fault_t)0x002000,
948 	VM_FAULT_COMPLETED      = (__force vm_fault_t)0x004000,
949 	VM_FAULT_HINDEX_MASK    = (__force vm_fault_t)0x0f0000,
950 };
951 
952 /* Encode hstate index for a hwpoisoned large page */
953 #define VM_FAULT_SET_HINDEX(x) ((__force vm_fault_t)((x) << 16))
954 #define VM_FAULT_GET_HINDEX(x) (((__force unsigned int)(x) >> 16) & 0xf)
955 
956 #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS |	\
957 			VM_FAULT_SIGSEGV | VM_FAULT_HWPOISON |	\
958 			VM_FAULT_HWPOISON_LARGE | VM_FAULT_FALLBACK)
959 
960 #define VM_FAULT_RESULT_TRACE \
961 	{ VM_FAULT_OOM,                 "OOM" },	\
962 	{ VM_FAULT_SIGBUS,              "SIGBUS" },	\
963 	{ VM_FAULT_MAJOR,               "MAJOR" },	\
964 	{ VM_FAULT_WRITE,               "WRITE" },	\
965 	{ VM_FAULT_HWPOISON,            "HWPOISON" },	\
966 	{ VM_FAULT_HWPOISON_LARGE,      "HWPOISON_LARGE" },	\
967 	{ VM_FAULT_SIGSEGV,             "SIGSEGV" },	\
968 	{ VM_FAULT_NOPAGE,              "NOPAGE" },	\
969 	{ VM_FAULT_LOCKED,              "LOCKED" },	\
970 	{ VM_FAULT_RETRY,               "RETRY" },	\
971 	{ VM_FAULT_FALLBACK,            "FALLBACK" },	\
972 	{ VM_FAULT_DONE_COW,            "DONE_COW" },	\
973 	{ VM_FAULT_NEEDDSYNC,           "NEEDDSYNC" },	\
974 	{ VM_FAULT_COMPLETED,           "COMPLETED" }
975 
976 struct vm_special_mapping {
977 	const char *name;	/* The name, e.g. "[vdso]". */
978 
979 	/*
980 	 * If .fault is not provided, this points to a
981 	 * NULL-terminated array of pages that back the special mapping.
982 	 *
983 	 * This must not be NULL unless .fault is provided.
984 	 */
985 	struct page **pages;
986 
987 	/*
988 	 * If non-NULL, then this is called to resolve page faults
989 	 * on the special mapping.  If used, .pages is not checked.
990 	 */
991 	vm_fault_t (*fault)(const struct vm_special_mapping *sm,
992 				struct vm_area_struct *vma,
993 				struct vm_fault *vmf);
994 
995 	int (*mremap)(const struct vm_special_mapping *sm,
996 		     struct vm_area_struct *new_vma);
997 };
998 
999 enum tlb_flush_reason {
1000 	TLB_FLUSH_ON_TASK_SWITCH,
1001 	TLB_REMOTE_SHOOTDOWN,
1002 	TLB_LOCAL_SHOOTDOWN,
1003 	TLB_LOCAL_MM_SHOOTDOWN,
1004 	TLB_REMOTE_SEND_IPI,
1005 	NR_TLB_FLUSH_REASONS,
1006 };
1007 
1008  /*
1009   * A swap entry has to fit into a "unsigned long", as the entry is hidden
1010   * in the "index" field of the swapper address space.
1011   */
1012 typedef struct {
1013 	unsigned long val;
1014 } swp_entry_t;
1015 
1016 /**
1017  * enum fault_flag - Fault flag definitions.
1018  * @FAULT_FLAG_WRITE: Fault was a write fault.
1019  * @FAULT_FLAG_MKWRITE: Fault was mkwrite of existing PTE.
1020  * @FAULT_FLAG_ALLOW_RETRY: Allow to retry the fault if blocked.
1021  * @FAULT_FLAG_RETRY_NOWAIT: Don't drop mmap_lock and wait when retrying.
1022  * @FAULT_FLAG_KILLABLE: The fault task is in SIGKILL killable region.
1023  * @FAULT_FLAG_TRIED: The fault has been tried once.
1024  * @FAULT_FLAG_USER: The fault originated in userspace.
1025  * @FAULT_FLAG_REMOTE: The fault is not for current task/mm.
1026  * @FAULT_FLAG_INSTRUCTION: The fault was during an instruction fetch.
1027  * @FAULT_FLAG_INTERRUPTIBLE: The fault can be interrupted by non-fatal signals.
1028  * @FAULT_FLAG_UNSHARE: The fault is an unsharing request to unshare (and mark
1029  *                      exclusive) a possibly shared anonymous page that is
1030  *                      mapped R/O.
1031  * @FAULT_FLAG_ORIG_PTE_VALID: whether the fault has vmf->orig_pte cached.
1032  *                        We should only access orig_pte if this flag set.
1033  * @FAULT_FLAG_VMA_LOCK: The fault is handled under VMA lock.
1034  *
1035  * About @FAULT_FLAG_ALLOW_RETRY and @FAULT_FLAG_TRIED: we can specify
1036  * whether we would allow page faults to retry by specifying these two
1037  * fault flags correctly.  Currently there can be three legal combinations:
1038  *
1039  * (a) ALLOW_RETRY and !TRIED:  this means the page fault allows retry, and
1040  *                              this is the first try
1041  *
1042  * (b) ALLOW_RETRY and TRIED:   this means the page fault allows retry, and
1043  *                              we've already tried at least once
1044  *
1045  * (c) !ALLOW_RETRY and !TRIED: this means the page fault does not allow retry
1046  *
1047  * The unlisted combination (!ALLOW_RETRY && TRIED) is illegal and should never
1048  * be used.  Note that page faults can be allowed to retry for multiple times,
1049  * in which case we'll have an initial fault with flags (a) then later on
1050  * continuous faults with flags (b).  We should always try to detect pending
1051  * signals before a retry to make sure the continuous page faults can still be
1052  * interrupted if necessary.
1053  *
1054  * The combination FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE is illegal.
1055  * FAULT_FLAG_UNSHARE is ignored and treated like an ordinary read fault when
1056  * no existing R/O-mapped anonymous page is encountered.
1057  */
1058 enum fault_flag {
1059 	FAULT_FLAG_WRITE =		1 << 0,
1060 	FAULT_FLAG_MKWRITE =		1 << 1,
1061 	FAULT_FLAG_ALLOW_RETRY =	1 << 2,
1062 	FAULT_FLAG_RETRY_NOWAIT = 	1 << 3,
1063 	FAULT_FLAG_KILLABLE =		1 << 4,
1064 	FAULT_FLAG_TRIED = 		1 << 5,
1065 	FAULT_FLAG_USER =		1 << 6,
1066 	FAULT_FLAG_REMOTE =		1 << 7,
1067 	FAULT_FLAG_INSTRUCTION =	1 << 8,
1068 	FAULT_FLAG_INTERRUPTIBLE =	1 << 9,
1069 	FAULT_FLAG_UNSHARE =		1 << 10,
1070 	FAULT_FLAG_ORIG_PTE_VALID =	1 << 11,
1071 	FAULT_FLAG_VMA_LOCK =		1 << 12,
1072 };
1073 
1074 typedef unsigned int __bitwise zap_flags_t;
1075 
1076 #endif /* _LINUX_MM_TYPES_H */
1077