• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MM_TYPES_H
3 #define _LINUX_MM_TYPES_H
4 
5 #include <linux/mm_types_task.h>
6 
7 #include <linux/auxvec.h>
8 #include <linux/list.h>
9 #include <linux/spinlock.h>
10 #include <linux/rbtree.h>
11 #include <linux/rwsem.h>
12 #include <linux/completion.h>
13 #include <linux/cpumask.h>
14 #include <linux/uprobes.h>
15 #include <linux/page-flags-layout.h>
16 #include <linux/workqueue.h>
17 
18 #include <asm/mmu.h>
19 
20 #ifndef AT_VECTOR_SIZE_ARCH
21 #define AT_VECTOR_SIZE_ARCH 0
22 #endif
23 #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
24 
25 struct address_space;
26 struct mem_cgroup;
27 struct hmm;
28 
29 /*
30  * Each physical page in the system has a struct page associated with
31  * it to keep track of whatever it is we are using the page for at the
32  * moment. Note that we have no way to track which tasks are using
33  * a page, though if it is a pagecache page, rmap structures can tell us
34  * who is mapping it.
35  *
36  * The objects in struct page are organized in double word blocks in
37  * order to allows us to use atomic double word operations on portions
38  * of struct page. That is currently only used by slub but the arrangement
39  * allows the use of atomic double word operations on the flags/mapping
40  * and lru list pointers also.
41  */
42 struct page {
43 	/* First double word block */
44 	unsigned long flags;		/* Atomic flags, some possibly
45 					 * updated asynchronously */
46 	union {
47 		struct address_space *mapping;	/* If low bit clear, points to
48 						 * inode address_space, or NULL.
49 						 * If page mapped as anonymous
50 						 * memory, low bit is set, and
51 						 * it points to anon_vma object:
52 						 * see PAGE_MAPPING_ANON below.
53 						 */
54 		void *s_mem;			/* slab first object */
55 		atomic_t compound_mapcount;	/* first tail page */
56 		/* page_deferred_list().next	 -- second tail page */
57 	};
58 
59 	/* Second double word */
60 	union {
61 		pgoff_t index;		/* Our offset within mapping. */
62 		void *freelist;		/* sl[aou]b first free object */
63 		/* page_deferred_list().prev	-- second tail page */
64 	};
65 
66 	union {
67 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
68 	defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
69 		/* Used for cmpxchg_double in slub */
70 		unsigned long counters;
71 #else
72 		/*
73 		 * Keep _refcount separate from slub cmpxchg_double data.
74 		 * As the rest of the double word is protected by slab_lock
75 		 * but _refcount is not.
76 		 */
77 		unsigned counters;
78 #endif
79 		struct {
80 
81 			union {
82 				/*
83 				 * Count of ptes mapped in mms, to show when
84 				 * page is mapped & limit reverse map searches.
85 				 *
86 				 * Extra information about page type may be
87 				 * stored here for pages that are never mapped,
88 				 * in which case the value MUST BE <= -2.
89 				 * See page-flags.h for more details.
90 				 */
91 				atomic_t _mapcount;
92 
93 				unsigned int active;		/* SLAB */
94 				struct {			/* SLUB */
95 					unsigned inuse:16;
96 					unsigned objects:15;
97 					unsigned frozen:1;
98 				};
99 				int units;			/* SLOB */
100 			};
101 			/*
102 			 * Usage count, *USE WRAPPER FUNCTION* when manual
103 			 * accounting. See page_ref.h
104 			 */
105 			atomic_t _refcount;
106 		};
107 	};
108 
109 	/*
110 	 * Third double word block
111 	 *
112 	 * WARNING: bit 0 of the first word encode PageTail(). That means
113 	 * the rest users of the storage space MUST NOT use the bit to
114 	 * avoid collision and false-positive PageTail().
115 	 */
116 	union {
117 		struct list_head lru;	/* Pageout list, eg. active_list
118 					 * protected by zone_lru_lock !
119 					 * Can be used as a generic list
120 					 * by the page owner.
121 					 */
122 		struct dev_pagemap *pgmap; /* ZONE_DEVICE pages are never on an
123 					    * lru or handled by a slab
124 					    * allocator, this points to the
125 					    * hosting device page map.
126 					    */
127 		struct {		/* slub per cpu partial pages */
128 			struct page *next;	/* Next partial slab */
129 #ifdef CONFIG_64BIT
130 			int pages;	/* Nr of partial slabs left */
131 			int pobjects;	/* Approximate # of objects */
132 #else
133 			short int pages;
134 			short int pobjects;
135 #endif
136 		};
137 
138 		struct rcu_head rcu_head;	/* Used by SLAB
139 						 * when destroying via RCU
140 						 */
141 		/* Tail pages of compound page */
142 		struct {
143 			unsigned long compound_head; /* If bit zero is set */
144 
145 			/* First tail page only */
146 #ifdef CONFIG_64BIT
147 			/*
148 			 * On 64 bit system we have enough space in struct page
149 			 * to encode compound_dtor and compound_order with
150 			 * unsigned int. It can help compiler generate better or
151 			 * smaller code on some archtectures.
152 			 */
153 			unsigned int compound_dtor;
154 			unsigned int compound_order;
155 #else
156 			unsigned short int compound_dtor;
157 			unsigned short int compound_order;
158 #endif
159 		};
160 
161 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
162 		struct {
163 			unsigned long __pad;	/* do not overlay pmd_huge_pte
164 						 * with compound_head to avoid
165 						 * possible bit 0 collision.
166 						 */
167 			pgtable_t pmd_huge_pte; /* protected by page->ptl */
168 		};
169 #endif
170 	};
171 
172 	/* Remainder is not double word aligned */
173 	union {
174 		unsigned long private;		/* Mapping-private opaque data:
175 					 	 * usually used for buffer_heads
176 						 * if PagePrivate set; used for
177 						 * swp_entry_t if PageSwapCache;
178 						 * indicates order in the buddy
179 						 * system if PG_buddy is set.
180 						 */
181 #if USE_SPLIT_PTE_PTLOCKS
182 #if ALLOC_SPLIT_PTLOCKS
183 		spinlock_t *ptl;
184 #else
185 		spinlock_t ptl;
186 #endif
187 #endif
188 		struct kmem_cache *slab_cache;	/* SL[AU]B: Pointer to slab */
189 	};
190 
191 #ifdef CONFIG_MEMCG
192 	struct mem_cgroup *mem_cgroup;
193 #endif
194 
195 	/*
196 	 * On machines where all RAM is mapped into kernel address space,
197 	 * we can simply calculate the virtual address. On machines with
198 	 * highmem some memory is mapped into kernel virtual memory
199 	 * dynamically, so we need a place to store that address.
200 	 * Note that this field could be 16 bits on x86 ... ;)
201 	 *
202 	 * Architectures with slow multiplication can define
203 	 * WANT_PAGE_VIRTUAL in asm/page.h
204 	 */
205 #if defined(WANT_PAGE_VIRTUAL)
206 	void *virtual;			/* Kernel virtual address (NULL if
207 					   not kmapped, ie. highmem) */
208 #endif /* WANT_PAGE_VIRTUAL */
209 
210 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
211 	int _last_cpupid;
212 #endif
213 }
214 /*
215  * The struct page can be forced to be double word aligned so that atomic ops
216  * on double words work. The SLUB allocator can make use of such a feature.
217  */
218 #ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
219 	__aligned(2 * sizeof(unsigned long))
220 #endif
221 ;
222 
223 #define PAGE_FRAG_CACHE_MAX_SIZE	__ALIGN_MASK(32768, ~PAGE_MASK)
224 #define PAGE_FRAG_CACHE_MAX_ORDER	get_order(PAGE_FRAG_CACHE_MAX_SIZE)
225 
226 struct page_frag_cache {
227 	void * va;
228 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
229 	__u16 offset;
230 	__u16 size;
231 #else
232 	__u32 offset;
233 #endif
234 	/* we maintain a pagecount bias, so that we dont dirty cache line
235 	 * containing page->_refcount every time we allocate a fragment.
236 	 */
237 	unsigned int		pagecnt_bias;
238 	bool pfmemalloc;
239 };
240 
241 typedef unsigned long vm_flags_t;
242 
compound_mapcount_ptr(struct page * page)243 static inline atomic_t *compound_mapcount_ptr(struct page *page)
244 {
245 	return &page[1].compound_mapcount;
246 }
247 
248 /*
249  * A region containing a mapping of a non-memory backed file under NOMMU
250  * conditions.  These are held in a global tree and are pinned by the VMAs that
251  * map parts of them.
252  */
253 struct vm_region {
254 	struct rb_node	vm_rb;		/* link in global region tree */
255 	vm_flags_t	vm_flags;	/* VMA vm_flags */
256 	unsigned long	vm_start;	/* start address of region */
257 	unsigned long	vm_end;		/* region initialised to here */
258 	unsigned long	vm_top;		/* region allocated to here */
259 	unsigned long	vm_pgoff;	/* the offset in vm_file corresponding to vm_start */
260 	struct file	*vm_file;	/* the backing file or NULL */
261 
262 	int		vm_usage;	/* region usage count (access under nommu_region_sem) */
263 	bool		vm_icache_flushed : 1; /* true if the icache has been flushed for
264 						* this region */
265 };
266 
267 #ifdef CONFIG_USERFAULTFD
268 #define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) { NULL, })
269 struct vm_userfaultfd_ctx {
270 	struct userfaultfd_ctx *ctx;
271 };
272 #else /* CONFIG_USERFAULTFD */
273 #define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) {})
274 struct vm_userfaultfd_ctx {};
275 #endif /* CONFIG_USERFAULTFD */
276 
277 /*
278  * This struct defines a memory VMM memory area. There is one of these
279  * per VM-area/task.  A VM area is any part of the process virtual memory
280  * space that has a special rule for the page-fault handlers (ie a shared
281  * library, the executable area etc).
282  */
283 struct vm_area_struct {
284 	/* The first cache line has the info for VMA tree walking. */
285 
286 	unsigned long vm_start;		/* Our start address within vm_mm. */
287 	unsigned long vm_end;		/* The first byte after our end address
288 					   within vm_mm. */
289 
290 	/* linked list of VM areas per task, sorted by address */
291 	struct vm_area_struct *vm_next, *vm_prev;
292 
293 	struct rb_node vm_rb;
294 
295 	/*
296 	 * Largest free memory gap in bytes to the left of this VMA.
297 	 * Either between this VMA and vma->vm_prev, or between one of the
298 	 * VMAs below us in the VMA rbtree and its ->vm_prev. This helps
299 	 * get_unmapped_area find a free area of the right size.
300 	 */
301 	unsigned long rb_subtree_gap;
302 
303 	/* Second cache line starts here. */
304 
305 	struct mm_struct *vm_mm;	/* The address space we belong to. */
306 	pgprot_t vm_page_prot;		/* Access permissions of this VMA. */
307 	unsigned long vm_flags;		/* Flags, see mm.h. */
308 
309 	/*
310 	 * For areas with an address space and backing store,
311 	 * linkage into the address_space->i_mmap interval tree.
312 	 *
313 	 * For private anonymous mappings, a pointer to a null terminated string
314 	 * in the user process containing the name given to the vma, or NULL
315 	 * if unnamed.
316 	 */
317 	union {
318 		struct {
319 			struct rb_node rb;
320 			unsigned long rb_subtree_last;
321 		} shared;
322 		const char __user *anon_name;
323 	};
324 
325 	/*
326 	 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
327 	 * list, after a COW of one of the file pages.	A MAP_SHARED vma
328 	 * can only be in the i_mmap tree.  An anonymous MAP_PRIVATE, stack
329 	 * or brk vma (with NULL file) can only be in an anon_vma list.
330 	 */
331 	struct list_head anon_vma_chain; /* Serialized by mmap_sem &
332 					  * page_table_lock */
333 	struct anon_vma *anon_vma;	/* Serialized by page_table_lock */
334 
335 	/* Function pointers to deal with this struct. */
336 	const struct vm_operations_struct *vm_ops;
337 
338 	/* Information about our backing store: */
339 	unsigned long vm_pgoff;		/* Offset (within vm_file) in PAGE_SIZE
340 					   units */
341 	struct file * vm_file;		/* File we map to (can be NULL). */
342 	void * vm_private_data;		/* was vm_pte (shared mem) */
343 
344 	atomic_long_t swap_readahead_info;
345 #ifndef CONFIG_MMU
346 	struct vm_region *vm_region;	/* NOMMU mapping region */
347 #endif
348 #ifdef CONFIG_NUMA
349 	struct mempolicy *vm_policy;	/* NUMA policy for the VMA */
350 #endif
351 	struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
352 } __randomize_layout;
353 
354 struct core_thread {
355 	struct task_struct *task;
356 	struct core_thread *next;
357 };
358 
359 struct core_state {
360 	atomic_t nr_threads;
361 	struct core_thread dumper;
362 	struct completion startup;
363 };
364 
365 struct kioctx_table;
366 struct mm_struct {
367 	struct vm_area_struct *mmap;		/* list of VMAs */
368 	struct rb_root mm_rb;
369 	u64 vmacache_seqnum;                   /* per-thread vmacache */
370 #ifdef CONFIG_MMU
371 	unsigned long (*get_unmapped_area) (struct file *filp,
372 				unsigned long addr, unsigned long len,
373 				unsigned long pgoff, unsigned long flags);
374 #endif
375 	unsigned long mmap_base;		/* base of mmap area */
376 	unsigned long mmap_legacy_base;         /* base of mmap area in bottom-up allocations */
377 #ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
378 	/* Base adresses for compatible mmap() */
379 	unsigned long mmap_compat_base;
380 	unsigned long mmap_compat_legacy_base;
381 #endif
382 	unsigned long task_size;		/* size of task vm space */
383 	unsigned long highest_vm_end;		/* highest vma end address */
384 	pgd_t * pgd;
385 
386 	/**
387 	 * @mm_users: The number of users including userspace.
388 	 *
389 	 * Use mmget()/mmget_not_zero()/mmput() to modify. When this drops
390 	 * to 0 (i.e. when the task exits and there are no other temporary
391 	 * reference holders), we also release a reference on @mm_count
392 	 * (which may then free the &struct mm_struct if @mm_count also
393 	 * drops to 0).
394 	 */
395 	atomic_t mm_users;
396 
397 	/**
398 	 * @mm_count: The number of references to &struct mm_struct
399 	 * (@mm_users count as 1).
400 	 *
401 	 * Use mmgrab()/mmdrop() to modify. When this drops to 0, the
402 	 * &struct mm_struct is freed.
403 	 */
404 	atomic_t mm_count;
405 
406 	atomic_long_t nr_ptes;			/* PTE page table pages */
407 #if CONFIG_PGTABLE_LEVELS > 2
408 	atomic_long_t nr_pmds;			/* PMD page table pages */
409 #endif
410 	int map_count;				/* number of VMAs */
411 
412 	spinlock_t page_table_lock;		/* Protects page tables and some counters */
413 	struct rw_semaphore mmap_sem;
414 
415 	struct list_head mmlist;		/* List of maybe swapped mm's.	These are globally strung
416 						 * together off init_mm.mmlist, and are protected
417 						 * by mmlist_lock
418 						 */
419 
420 
421 	unsigned long hiwater_rss;	/* High-watermark of RSS usage */
422 	unsigned long hiwater_vm;	/* High-water virtual memory usage */
423 
424 	unsigned long total_vm;		/* Total pages mapped */
425 	unsigned long locked_vm;	/* Pages that have PG_mlocked set */
426 	unsigned long pinned_vm;	/* Refcount permanently increased */
427 	unsigned long data_vm;		/* VM_WRITE & ~VM_SHARED & ~VM_STACK */
428 	unsigned long exec_vm;		/* VM_EXEC & ~VM_WRITE & ~VM_STACK */
429 	unsigned long stack_vm;		/* VM_STACK */
430 	unsigned long def_flags;
431 	unsigned long start_code, end_code, start_data, end_data;
432 	unsigned long start_brk, brk, start_stack;
433 	unsigned long arg_start, arg_end, env_start, env_end;
434 
435 	unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
436 
437 	/*
438 	 * Special counters, in some configurations protected by the
439 	 * page_table_lock, in other configurations by being atomic.
440 	 */
441 	struct mm_rss_stat rss_stat;
442 
443 	struct linux_binfmt *binfmt;
444 
445 	cpumask_var_t cpu_vm_mask_var;
446 
447 	/* Architecture-specific MM context */
448 	mm_context_t context;
449 
450 	unsigned long flags; /* Must use atomic bitops to access the bits */
451 
452 	struct core_state *core_state; /* coredumping support */
453 #ifdef CONFIG_MEMBARRIER
454 	atomic_t membarrier_state;
455 #endif
456 #ifdef CONFIG_AIO
457 	spinlock_t			ioctx_lock;
458 	struct kioctx_table __rcu	*ioctx_table;
459 #endif
460 #ifdef CONFIG_MEMCG
461 	/*
462 	 * "owner" points to a task that is regarded as the canonical
463 	 * user/owner of this mm. All of the following must be true in
464 	 * order for it to be changed:
465 	 *
466 	 * current == mm->owner
467 	 * current->mm != mm
468 	 * new_owner->mm == mm
469 	 * new_owner->alloc_lock is held
470 	 */
471 	struct task_struct __rcu *owner;
472 #endif
473 	struct user_namespace *user_ns;
474 
475 	/* store ref to file /proc/<pid>/exe symlink points to */
476 	struct file __rcu *exe_file;
477 #ifdef CONFIG_MMU_NOTIFIER
478 	struct mmu_notifier_mm *mmu_notifier_mm;
479 #endif
480 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
481 	pgtable_t pmd_huge_pte; /* protected by page_table_lock */
482 #endif
483 #ifdef CONFIG_CPUMASK_OFFSTACK
484 	struct cpumask cpumask_allocation;
485 #endif
486 #ifdef CONFIG_NUMA_BALANCING
487 	/*
488 	 * numa_next_scan is the next time that the PTEs will be marked
489 	 * pte_numa. NUMA hinting faults will gather statistics and migrate
490 	 * pages to new nodes if necessary.
491 	 */
492 	unsigned long numa_next_scan;
493 
494 	/* Restart point for scanning and setting pte_numa */
495 	unsigned long numa_scan_offset;
496 
497 	/* numa_scan_seq prevents two threads setting pte_numa */
498 	int numa_scan_seq;
499 #endif
500 	/*
501 	 * An operation with batched TLB flushing is going on. Anything that
502 	 * can move process memory needs to flush the TLB when moving a
503 	 * PROT_NONE or PROT_NUMA mapped page.
504 	 */
505 	atomic_t tlb_flush_pending;
506 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
507 	/* See flush_tlb_batched_pending() */
508 	bool tlb_flush_batched;
509 #endif
510 	struct uprobes_state uprobes_state;
511 #ifdef CONFIG_HUGETLB_PAGE
512 	atomic_long_t hugetlb_usage;
513 #endif
514 	struct work_struct async_put_work;
515 
516 #if IS_ENABLED(CONFIG_HMM)
517 	/* HMM needs to track a few things per mm */
518 	struct hmm *hmm;
519 #endif
520 } __randomize_layout;
521 
522 extern struct mm_struct init_mm;
523 
mm_init_cpumask(struct mm_struct * mm)524 static inline void mm_init_cpumask(struct mm_struct *mm)
525 {
526 #ifdef CONFIG_CPUMASK_OFFSTACK
527 	mm->cpu_vm_mask_var = &mm->cpumask_allocation;
528 #endif
529 	cpumask_clear(mm->cpu_vm_mask_var);
530 }
531 
532 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
mm_cpumask(struct mm_struct * mm)533 static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
534 {
535 	return mm->cpu_vm_mask_var;
536 }
537 
538 struct mmu_gather;
539 extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
540 				unsigned long start, unsigned long end);
541 extern void tlb_finish_mmu(struct mmu_gather *tlb,
542 				unsigned long start, unsigned long end);
543 
init_tlb_flush_pending(struct mm_struct * mm)544 static inline void init_tlb_flush_pending(struct mm_struct *mm)
545 {
546 	atomic_set(&mm->tlb_flush_pending, 0);
547 }
548 
inc_tlb_flush_pending(struct mm_struct * mm)549 static inline void inc_tlb_flush_pending(struct mm_struct *mm)
550 {
551 	atomic_inc(&mm->tlb_flush_pending);
552 	/*
553 	 * The only time this value is relevant is when there are indeed pages
554 	 * to flush. And we'll only flush pages after changing them, which
555 	 * requires the PTL.
556 	 *
557 	 * So the ordering here is:
558 	 *
559 	 *	atomic_inc(&mm->tlb_flush_pending);
560 	 *	spin_lock(&ptl);
561 	 *	...
562 	 *	set_pte_at();
563 	 *	spin_unlock(&ptl);
564 	 *
565 	 *				spin_lock(&ptl)
566 	 *				mm_tlb_flush_pending();
567 	 *				....
568 	 *				spin_unlock(&ptl);
569 	 *
570 	 *	flush_tlb_range();
571 	 *	atomic_dec(&mm->tlb_flush_pending);
572 	 *
573 	 * Where the increment if constrained by the PTL unlock, it thus
574 	 * ensures that the increment is visible if the PTE modification is
575 	 * visible. After all, if there is no PTE modification, nobody cares
576 	 * about TLB flushes either.
577 	 *
578 	 * This very much relies on users (mm_tlb_flush_pending() and
579 	 * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and
580 	 * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc
581 	 * locks (PPC) the unlock of one doesn't order against the lock of
582 	 * another PTL.
583 	 *
584 	 * The decrement is ordered by the flush_tlb_range(), such that
585 	 * mm_tlb_flush_pending() will not return false unless all flushes have
586 	 * completed.
587 	 */
588 }
589 
dec_tlb_flush_pending(struct mm_struct * mm)590 static inline void dec_tlb_flush_pending(struct mm_struct *mm)
591 {
592 	/*
593 	 * See inc_tlb_flush_pending().
594 	 *
595 	 * This cannot be smp_mb__before_atomic() because smp_mb() simply does
596 	 * not order against TLB invalidate completion, which is what we need.
597 	 *
598 	 * Therefore we must rely on tlb_flush_*() to guarantee order.
599 	 */
600 	atomic_dec(&mm->tlb_flush_pending);
601 }
602 
mm_tlb_flush_pending(struct mm_struct * mm)603 static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
604 {
605 	/*
606 	 * Must be called after having acquired the PTL; orders against that
607 	 * PTLs release and therefore ensures that if we observe the modified
608 	 * PTE we must also observe the increment from inc_tlb_flush_pending().
609 	 *
610 	 * That is, it only guarantees to return true if there is a flush
611 	 * pending for _this_ PTL.
612 	 */
613 	return atomic_read(&mm->tlb_flush_pending);
614 }
615 
mm_tlb_flush_nested(struct mm_struct * mm)616 static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
617 {
618 	/*
619 	 * Similar to mm_tlb_flush_pending(), we must have acquired the PTL
620 	 * for which there is a TLB flush pending in order to guarantee
621 	 * we've seen both that PTE modification and the increment.
622 	 *
623 	 * (no requirement on actually still holding the PTL, that is irrelevant)
624 	 */
625 	return atomic_read(&mm->tlb_flush_pending) > 1;
626 }
627 
628 struct vm_fault;
629 
630 struct vm_special_mapping {
631 	const char *name;	/* The name, e.g. "[vdso]". */
632 
633 	/*
634 	 * If .fault is not provided, this points to a
635 	 * NULL-terminated array of pages that back the special mapping.
636 	 *
637 	 * This must not be NULL unless .fault is provided.
638 	 */
639 	struct page **pages;
640 
641 	/*
642 	 * If non-NULL, then this is called to resolve page faults
643 	 * on the special mapping.  If used, .pages is not checked.
644 	 */
645 	int (*fault)(const struct vm_special_mapping *sm,
646 		     struct vm_area_struct *vma,
647 		     struct vm_fault *vmf);
648 
649 	int (*mremap)(const struct vm_special_mapping *sm,
650 		     struct vm_area_struct *new_vma);
651 };
652 
653 enum tlb_flush_reason {
654 	TLB_FLUSH_ON_TASK_SWITCH,
655 	TLB_REMOTE_SHOOTDOWN,
656 	TLB_LOCAL_SHOOTDOWN,
657 	TLB_LOCAL_MM_SHOOTDOWN,
658 	TLB_REMOTE_SEND_IPI,
659 	NR_TLB_FLUSH_REASONS,
660 };
661 
662  /*
663   * A swap entry has to fit into a "unsigned long", as the entry is hidden
664   * in the "index" field of the swapper address space.
665   */
666 typedef struct {
667 	unsigned long val;
668 } swp_entry_t;
669 
670 /* Return the name for an anonymous mapping or NULL for a file-backed mapping */
vma_get_anon_name(struct vm_area_struct * vma)671 static inline const char __user *vma_get_anon_name(struct vm_area_struct *vma)
672 {
673 	if (vma->vm_file)
674 		return NULL;
675 
676 	return vma->anon_name;
677 }
678 
679 #endif /* _LINUX_MM_TYPES_H */
680