1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * vma.h
4  *
5  * Core VMA manipulation API implemented in vma.c.
6  */
7 #ifndef __MM_VMA_H
8 #define __MM_VMA_H
9 
10 /*
11  * VMA lock generalization
12  */
13 struct vma_prepare {
14 	struct vm_area_struct *vma;
15 	struct vm_area_struct *adj_next;
16 	struct file *file;
17 	struct address_space *mapping;
18 	struct anon_vma *anon_vma;
19 	struct vm_area_struct *insert;
20 	struct vm_area_struct *remove;
21 	struct vm_area_struct *remove2;
22 };
23 
24 struct unlink_vma_file_batch {
25 	int count;
26 	struct vm_area_struct *vmas[8];
27 };
28 
29 /*
30  * vma munmap operation
31  */
32 struct vma_munmap_struct {
33 	struct vma_iterator *vmi;
34 	struct vm_area_struct *vma;     /* The first vma to munmap */
35 	struct vm_area_struct *prev;    /* vma before the munmap area */
36 	struct vm_area_struct *next;    /* vma after the munmap area */
37 	struct list_head *uf;           /* Userfaultfd list_head */
38 	unsigned long start;            /* Aligned start addr (inclusive) */
39 	unsigned long end;              /* Aligned end addr (exclusive) */
40 	unsigned long unmap_start;      /* Unmap PTE start */
41 	unsigned long unmap_end;        /* Unmap PTE end */
42 	int vma_count;                  /* Number of vmas that will be removed */
43 	bool unlock;                    /* Unlock after the munmap */
44 	bool clear_ptes;                /* If there are outstanding PTE to be cleared */
45 	/* 2 byte hole */
46 	unsigned long nr_pages;         /* Number of pages being removed */
47 	unsigned long locked_vm;        /* Number of locked pages */
48 	unsigned long nr_accounted;     /* Number of VM_ACCOUNT pages */
49 	unsigned long exec_vm;
50 	unsigned long stack_vm;
51 	unsigned long data_vm;
52 };
53 
54 enum vma_merge_state {
55 	VMA_MERGE_START,
56 	VMA_MERGE_ERROR_NOMEM,
57 	VMA_MERGE_NOMERGE,
58 	VMA_MERGE_SUCCESS,
59 };
60 
61 enum vma_merge_flags {
62 	VMG_FLAG_DEFAULT = 0,
63 	/*
64 	 * If we can expand, simply do so. We know there is nothing to merge to
65 	 * the right. Does not reset state upon failure to merge. The VMA
66 	 * iterator is assumed to be positioned at the previous VMA, rather than
67 	 * at the gap.
68 	 */
69 	VMG_FLAG_JUST_EXPAND = 1 << 0,
70 };
71 
72 /* Represents a VMA merge operation. */
73 struct vma_merge_struct {
74 	struct mm_struct *mm;
75 	struct vma_iterator *vmi;
76 	pgoff_t pgoff;
77 	struct vm_area_struct *prev;
78 	struct vm_area_struct *next; /* Modified by vma_merge(). */
79 	struct vm_area_struct *vma; /* Either a new VMA or the one being modified. */
80 	unsigned long start;
81 	unsigned long end;
82 	unsigned long flags;
83 	struct file *file;
84 	struct anon_vma *anon_vma;
85 	struct mempolicy *policy;
86 	struct vm_userfaultfd_ctx uffd_ctx;
87 	struct anon_vma_name *anon_name;
88 	enum vma_merge_flags merge_flags;
89 	enum vma_merge_state state;
90 
91 	/*
92 	 * If a merge is possible, but an OOM error occurs, give up and don't
93 	 * execute the merge, returning NULL.
94 	 */
95 	bool give_up_on_oom :1;
96 };
97 
vmg_nomem(struct vma_merge_struct * vmg)98 static inline bool vmg_nomem(struct vma_merge_struct *vmg)
99 {
100 	return vmg->state == VMA_MERGE_ERROR_NOMEM;
101 }
102 
103 /* Assumes addr >= vma->vm_start. */
vma_pgoff_offset(struct vm_area_struct * vma,unsigned long addr)104 static inline pgoff_t vma_pgoff_offset(struct vm_area_struct *vma,
105 				       unsigned long addr)
106 {
107 	return vma->vm_pgoff + PHYS_PFN(addr - vma->vm_start);
108 }
109 
110 #define VMG_STATE(name, mm_, vmi_, start_, end_, flags_, pgoff_)	\
111 	struct vma_merge_struct name = {				\
112 		.mm = mm_,						\
113 		.vmi = vmi_,						\
114 		.start = start_,					\
115 		.end = end_,						\
116 		.flags = flags_,					\
117 		.pgoff = pgoff_,					\
118 		.state = VMA_MERGE_START,				\
119 		.merge_flags = VMG_FLAG_DEFAULT,			\
120 	}
121 
122 #define VMG_VMA_STATE(name, vmi_, prev_, vma_, start_, end_)	\
123 	struct vma_merge_struct name = {			\
124 		.mm = vma_->vm_mm,				\
125 		.vmi = vmi_,					\
126 		.prev = prev_,					\
127 		.next = NULL,					\
128 		.vma = vma_,					\
129 		.start = start_,				\
130 		.end = end_,					\
131 		.flags = vma_->vm_flags,			\
132 		.pgoff = vma_pgoff_offset(vma_, start_),	\
133 		.file = vma_->vm_file,				\
134 		.anon_vma = vma_->anon_vma,			\
135 		.policy = vma_policy(vma_),			\
136 		.uffd_ctx = vma_->vm_userfaultfd_ctx,		\
137 		.anon_name = anon_vma_name(vma_),		\
138 		.state = VMA_MERGE_START,			\
139 		.merge_flags = VMG_FLAG_DEFAULT,		\
140 	}
141 
142 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
143 void validate_mm(struct mm_struct *mm);
144 #else
145 #define validate_mm(mm) do { } while (0)
146 #endif
147 
148 /* Required for expand_downwards(). */
149 void anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma);
150 
151 /* Required for expand_downwards(). */
152 void anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma);
153 
154 int vma_expand(struct vma_merge_struct *vmg);
155 int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
156 	       unsigned long start, unsigned long end, pgoff_t pgoff);
157 
vma_iter_store_gfp(struct vma_iterator * vmi,struct vm_area_struct * vma,gfp_t gfp)158 static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
159 			struct vm_area_struct *vma, gfp_t gfp)
160 
161 {
162 	if (vmi->mas.status != ma_start &&
163 	    ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
164 		vma_iter_invalidate(vmi);
165 
166 	__mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
167 	mas_store_gfp(&vmi->mas, vma, gfp);
168 	if (unlikely(mas_is_err(&vmi->mas)))
169 		return -ENOMEM;
170 
171 	vma_mark_attached(vma);
172 	return 0;
173 }
174 
175 #ifdef CONFIG_MMU
176 /*
177  * init_vma_munmap() - Initializer wrapper for vma_munmap_struct
178  * @vms: The vma munmap struct
179  * @vmi: The vma iterator
180  * @vma: The first vm_area_struct to munmap
181  * @start: The aligned start address to munmap
182  * @end: The aligned end address to munmap
183  * @uf: The userfaultfd list_head
184  * @unlock: Unlock after the operation.  Only unlocked on success
185  */
init_vma_munmap(struct vma_munmap_struct * vms,struct vma_iterator * vmi,struct vm_area_struct * vma,unsigned long start,unsigned long end,struct list_head * uf,bool unlock)186 static inline void init_vma_munmap(struct vma_munmap_struct *vms,
187 		struct vma_iterator *vmi, struct vm_area_struct *vma,
188 		unsigned long start, unsigned long end, struct list_head *uf,
189 		bool unlock)
190 {
191 	vms->vmi = vmi;
192 	vms->vma = vma;
193 	if (vma) {
194 		vms->start = start;
195 		vms->end = end;
196 	} else {
197 		vms->start = vms->end = 0;
198 	}
199 	vms->unlock = unlock;
200 	vms->uf = uf;
201 	vms->vma_count = 0;
202 	vms->nr_pages = vms->locked_vm = vms->nr_accounted = 0;
203 	vms->exec_vm = vms->stack_vm = vms->data_vm = 0;
204 	vms->unmap_start = FIRST_USER_ADDRESS;
205 	vms->unmap_end = USER_PGTABLES_CEILING;
206 	vms->clear_ptes = false;
207 }
208 #endif
209 
210 int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
211 		struct ma_state *mas_detach);
212 
213 void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
214 		struct ma_state *mas_detach);
215 
216 void vms_clean_up_area(struct vma_munmap_struct *vms,
217 		struct ma_state *mas_detach);
218 
219 /*
220  * reattach_vmas() - Undo any munmap work and free resources
221  * @mas_detach: The maple state with the detached maple tree
222  *
223  * Reattach any detached vmas and free up the maple tree used to track the vmas.
224  */
reattach_vmas(struct ma_state * mas_detach)225 static inline void reattach_vmas(struct ma_state *mas_detach)
226 {
227 	struct vm_area_struct *vma;
228 
229 	mas_set(mas_detach, 0);
230 	mas_for_each(mas_detach, vma, ULONG_MAX)
231 		vma_mark_attached(vma);
232 
233 	__mt_destroy(mas_detach->tree);
234 }
235 
236 /*
237  * vms_abort_munmap_vmas() - Undo as much as possible from an aborted munmap()
238  * operation.
239  * @vms: The vma unmap structure
240  * @mas_detach: The maple state with the detached maple tree
241  *
242  * Reattach any detached vmas, free up the maple tree used to track the vmas.
243  * If that's not possible because the ptes are cleared (and vm_ops->closed() may
244  * have been called), then a NULL is written over the vmas and the vmas are
245  * removed (munmap() completed).
246  */
vms_abort_munmap_vmas(struct vma_munmap_struct * vms,struct ma_state * mas_detach)247 static inline void vms_abort_munmap_vmas(struct vma_munmap_struct *vms,
248 		struct ma_state *mas_detach)
249 {
250 	struct ma_state *mas = &vms->vmi->mas;
251 	if (!vms->nr_pages)
252 		return;
253 
254 	if (vms->clear_ptes)
255 		return reattach_vmas(mas_detach);
256 
257 	/*
258 	 * Aborting cannot just call the vm_ops open() because they are often
259 	 * not symmetrical and state data has been lost.  Resort to the old
260 	 * failure method of leaving a gap where the MAP_FIXED mapping failed.
261 	 */
262 	mas_set_range(mas, vms->start, vms->end - 1);
263 	mas_store_gfp(mas, NULL, GFP_KERNEL|__GFP_NOFAIL);
264 	/* Clean up the insertion of the unfortunate gap */
265 	vms_complete_munmap_vmas(vms, mas_detach);
266 }
267 
268 int
269 do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
270 		    struct mm_struct *mm, unsigned long start,
271 		    unsigned long end, struct list_head *uf, bool unlock);
272 
273 int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
274 		  unsigned long start, size_t len, struct list_head *uf,
275 		  bool unlock);
276 
277 void remove_vma(struct vm_area_struct *vma);
278 
279 void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
280 		struct vm_area_struct *prev, struct vm_area_struct *next);
281 
282 /* We are about to modify the VMA's flags. */
283 struct vm_area_struct *vma_modify_flags(struct vma_iterator *vmi,
284 		struct vm_area_struct *prev, struct vm_area_struct *vma,
285 		unsigned long start, unsigned long end,
286 		unsigned long new_flags);
287 
288 /* We are about to modify the VMA's flags and/or anon_name. */
289 struct vm_area_struct
290 *vma_modify_flags_name(struct vma_iterator *vmi,
291 		       struct vm_area_struct *prev,
292 		       struct vm_area_struct *vma,
293 		       unsigned long start,
294 		       unsigned long end,
295 		       unsigned long new_flags,
296 		       struct anon_vma_name *new_name);
297 
298 /* We are about to modify the VMA's memory policy. */
299 struct vm_area_struct
300 *vma_modify_policy(struct vma_iterator *vmi,
301 		   struct vm_area_struct *prev,
302 		   struct vm_area_struct *vma,
303 		   unsigned long start, unsigned long end,
304 		   struct mempolicy *new_pol);
305 
306 /* We are about to modify the VMA's flags and/or uffd context. */
307 struct vm_area_struct
308 *vma_modify_flags_uffd(struct vma_iterator *vmi,
309 		       struct vm_area_struct *prev,
310 		       struct vm_area_struct *vma,
311 		       unsigned long start, unsigned long end,
312 		       unsigned long new_flags,
313 		       struct vm_userfaultfd_ctx new_ctx,
314 		       bool give_up_on_oom);
315 
316 struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg);
317 
318 struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
319 					struct vm_area_struct *vma,
320 					unsigned long delta);
321 
322 void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb);
323 
324 void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb);
325 
326 void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb,
327 			       struct vm_area_struct *vma);
328 
329 void unlink_file_vma(struct vm_area_struct *vma);
330 
331 void vma_link_file(struct vm_area_struct *vma);
332 
333 int vma_link(struct mm_struct *mm, struct vm_area_struct *vma);
334 
335 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
336 	unsigned long addr, unsigned long len, pgoff_t pgoff,
337 	bool *need_rmap_locks);
338 
339 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma);
340 
341 bool vma_needs_dirty_tracking(struct vm_area_struct *vma);
342 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
343 
344 int mm_take_all_locks(struct mm_struct *mm);
345 void mm_drop_all_locks(struct mm_struct *mm);
346 
vma_wants_manual_pte_write_upgrade(struct vm_area_struct * vma)347 static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma)
348 {
349 	/*
350 	 * We want to check manually if we can change individual PTEs writable
351 	 * if we can't do that automatically for all PTEs in a mapping. For
352 	 * private mappings, that's always the case when we have write
353 	 * permissions as we properly have to handle COW.
354 	 */
355 	if (vma->vm_flags & VM_SHARED)
356 		return vma_wants_writenotify(vma, vma->vm_page_prot);
357 	return !!(vma->vm_flags & VM_WRITE);
358 }
359 
360 #ifdef CONFIG_MMU
vm_pgprot_modify(pgprot_t oldprot,unsigned long vm_flags)361 static inline pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
362 {
363 	return pgprot_modify(oldprot, vm_get_page_prot(vm_flags));
364 }
365 #endif
366 
vma_prev_limit(struct vma_iterator * vmi,unsigned long min)367 static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi,
368 						    unsigned long min)
369 {
370 	return mas_prev(&vmi->mas, min);
371 }
372 
373 /*
374  * These three helpers classifies VMAs for virtual memory accounting.
375  */
376 
377 /*
378  * Executable code area - executable, not writable, not stack
379  */
is_exec_mapping(vm_flags_t flags)380 static inline bool is_exec_mapping(vm_flags_t flags)
381 {
382 	return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
383 }
384 
385 /*
386  * Stack area (including shadow stacks)
387  *
388  * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
389  * do_mmap() forbids all other combinations.
390  */
is_stack_mapping(vm_flags_t flags)391 static inline bool is_stack_mapping(vm_flags_t flags)
392 {
393 	return ((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK);
394 }
395 
396 /*
397  * Data area - private, writable, not stack
398  */
is_data_mapping(vm_flags_t flags)399 static inline bool is_data_mapping(vm_flags_t flags)
400 {
401 	return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
402 }
403 
404 
vma_iter_config(struct vma_iterator * vmi,unsigned long index,unsigned long last)405 static inline void vma_iter_config(struct vma_iterator *vmi,
406 		unsigned long index, unsigned long last)
407 {
408 	__mas_set_range(&vmi->mas, index, last - 1);
409 }
410 
vma_iter_reset(struct vma_iterator * vmi)411 static inline void vma_iter_reset(struct vma_iterator *vmi)
412 {
413 	mas_reset(&vmi->mas);
414 }
415 
416 static inline
vma_iter_prev_range_limit(struct vma_iterator * vmi,unsigned long min)417 struct vm_area_struct *vma_iter_prev_range_limit(struct vma_iterator *vmi, unsigned long min)
418 {
419 	return mas_prev_range(&vmi->mas, min);
420 }
421 
422 static inline
vma_iter_next_range_limit(struct vma_iterator * vmi,unsigned long max)423 struct vm_area_struct *vma_iter_next_range_limit(struct vma_iterator *vmi, unsigned long max)
424 {
425 	return mas_next_range(&vmi->mas, max);
426 }
427 
vma_iter_area_lowest(struct vma_iterator * vmi,unsigned long min,unsigned long max,unsigned long size)428 static inline int vma_iter_area_lowest(struct vma_iterator *vmi, unsigned long min,
429 				       unsigned long max, unsigned long size)
430 {
431 	return mas_empty_area(&vmi->mas, min, max - 1, size);
432 }
433 
vma_iter_area_highest(struct vma_iterator * vmi,unsigned long min,unsigned long max,unsigned long size)434 static inline int vma_iter_area_highest(struct vma_iterator *vmi, unsigned long min,
435 					unsigned long max, unsigned long size)
436 {
437 	return mas_empty_area_rev(&vmi->mas, min, max - 1, size);
438 }
439 
440 /*
441  * VMA Iterator functions shared between nommu and mmap
442  */
vma_iter_prealloc(struct vma_iterator * vmi,struct vm_area_struct * vma)443 static inline int vma_iter_prealloc(struct vma_iterator *vmi,
444 		struct vm_area_struct *vma)
445 {
446 	return mas_preallocate(&vmi->mas, vma, GFP_KERNEL);
447 }
448 
vma_iter_clear(struct vma_iterator * vmi)449 static inline void vma_iter_clear(struct vma_iterator *vmi)
450 {
451 	mas_store_prealloc(&vmi->mas, NULL);
452 }
453 
vma_iter_load(struct vma_iterator * vmi)454 static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
455 {
456 	return mas_walk(&vmi->mas);
457 }
458 
459 /* Store a VMA with preallocated memory */
vma_iter_store_overwrite(struct vma_iterator * vmi,struct vm_area_struct * vma)460 static inline void vma_iter_store_overwrite(struct vma_iterator *vmi,
461 					    struct vm_area_struct *vma)
462 {
463 	vma_assert_attached(vma);
464 
465 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
466 	if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
467 			vmi->mas.index > vma->vm_start)) {
468 		pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n",
469 			vmi->mas.index, vma->vm_start, vma->vm_start,
470 			vma->vm_end, vmi->mas.index, vmi->mas.last);
471 	}
472 	if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
473 			vmi->mas.last <  vma->vm_start)) {
474 		pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n",
475 		       vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end,
476 		       vmi->mas.index, vmi->mas.last);
477 	}
478 #endif
479 
480 	if (vmi->mas.status != ma_start &&
481 	    ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
482 		vma_iter_invalidate(vmi);
483 
484 	__mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
485 	mas_store_prealloc(&vmi->mas, vma);
486 }
487 
vma_iter_store_new(struct vma_iterator * vmi,struct vm_area_struct * vma)488 static inline void vma_iter_store_new(struct vma_iterator *vmi,
489 				      struct vm_area_struct *vma)
490 {
491 	vma_mark_attached(vma);
492 	vma_iter_store_overwrite(vmi, vma);
493 }
494 
vma_iter_addr(struct vma_iterator * vmi)495 static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
496 {
497 	return vmi->mas.index;
498 }
499 
vma_iter_end(struct vma_iterator * vmi)500 static inline unsigned long vma_iter_end(struct vma_iterator *vmi)
501 {
502 	return vmi->mas.last + 1;
503 }
504 
vma_iter_bulk_alloc(struct vma_iterator * vmi,unsigned long count)505 static inline int vma_iter_bulk_alloc(struct vma_iterator *vmi,
506 				      unsigned long count)
507 {
508 	return mas_expected_entries(&vmi->mas, count);
509 }
510 
511 static inline
vma_iter_prev_range(struct vma_iterator * vmi)512 struct vm_area_struct *vma_iter_prev_range(struct vma_iterator *vmi)
513 {
514 	return mas_prev_range(&vmi->mas, 0);
515 }
516 
517 /*
518  * Retrieve the next VMA and rewind the iterator to end of the previous VMA, or
519  * if no previous VMA, to index 0.
520  */
521 static inline
vma_iter_next_rewind(struct vma_iterator * vmi,struct vm_area_struct ** pprev)522 struct vm_area_struct *vma_iter_next_rewind(struct vma_iterator *vmi,
523 		struct vm_area_struct **pprev)
524 {
525 	struct vm_area_struct *next = vma_next(vmi);
526 	struct vm_area_struct *prev = vma_prev(vmi);
527 
528 	/*
529 	 * Consider the case where no previous VMA exists. We advance to the
530 	 * next VMA, skipping any gap, then rewind to the start of the range.
531 	 *
532 	 * If we were to unconditionally advance to the next range we'd wind up
533 	 * at the next VMA again, so we check to ensure there is a previous VMA
534 	 * to skip over.
535 	 */
536 	if (prev)
537 		vma_iter_next_range(vmi);
538 
539 	if (pprev)
540 		*pprev = prev;
541 
542 	return next;
543 }
544 
545 #ifdef CONFIG_64BIT
546 
vma_is_sealed(struct vm_area_struct * vma)547 static inline bool vma_is_sealed(struct vm_area_struct *vma)
548 {
549 	return (vma->vm_flags & VM_SEALED);
550 }
551 
552 /*
553  * check if a vma is sealed for modification.
554  * return true, if modification is allowed.
555  */
can_modify_vma(struct vm_area_struct * vma)556 static inline bool can_modify_vma(struct vm_area_struct *vma)
557 {
558 	if (unlikely(vma_is_sealed(vma)))
559 		return false;
560 
561 	return true;
562 }
563 
564 bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior);
565 
566 #else
567 
can_modify_vma(struct vm_area_struct * vma)568 static inline bool can_modify_vma(struct vm_area_struct *vma)
569 {
570 	return true;
571 }
572 
can_modify_vma_madv(struct vm_area_struct * vma,int behavior)573 static inline bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior)
574 {
575 	return true;
576 }
577 
578 #endif
579 
580 #endif	/* __MM_VMA_H */
581