• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  Copyright (C) 1993  Linus Torvalds
4  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
5  *  SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
6  *  Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
7  *  Numa awareness, Christoph Lameter, SGI, June 2005
8  *  Improving global KVA allocator, Uladzislau Rezki, Sony, May 2019
9  */
10 
11 #include <linux/vmalloc.h>
12 #include <linux/mm.h>
13 #include <linux/module.h>
14 #include <linux/highmem.h>
15 #include <linux/sched/signal.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/interrupt.h>
19 #include <linux/proc_fs.h>
20 #include <linux/seq_file.h>
21 #include <linux/set_memory.h>
22 #include <linux/debugobjects.h>
23 #include <linux/kallsyms.h>
24 #include <linux/list.h>
25 #include <linux/notifier.h>
26 #include <linux/rbtree.h>
27 #include <linux/xarray.h>
28 #include <linux/io.h>
29 #include <linux/rcupdate.h>
30 #include <linux/pfn.h>
31 #include <linux/kmemleak.h>
32 #include <linux/atomic.h>
33 #include <linux/compiler.h>
34 #include <linux/llist.h>
35 #include <linux/bitops.h>
36 #include <linux/rbtree_augmented.h>
37 #include <linux/overflow.h>
38 #include <linux/pgtable.h>
39 #include <linux/uaccess.h>
40 #include <linux/hugetlb.h>
41 #include <linux/io.h>
42 #include <asm/tlbflush.h>
43 #include <asm/shmparam.h>
44 
45 #include "internal.h"
46 #include "pgalloc-track.h"
47 
48 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
49 static unsigned int __ro_after_init ioremap_max_page_shift = BITS_PER_LONG - 1;
50 
set_nohugeiomap(char * str)51 static int __init set_nohugeiomap(char *str)
52 {
53 	ioremap_max_page_shift = PAGE_SHIFT;
54 	return 0;
55 }
56 early_param("nohugeiomap", set_nohugeiomap);
57 #else /* CONFIG_HAVE_ARCH_HUGE_VMAP */
58 static const unsigned int ioremap_max_page_shift = PAGE_SHIFT;
59 #endif	/* CONFIG_HAVE_ARCH_HUGE_VMAP */
60 
61 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
62 static bool __ro_after_init vmap_allow_huge = true;
63 
set_nohugevmalloc(char * str)64 static int __init set_nohugevmalloc(char *str)
65 {
66 	vmap_allow_huge = false;
67 	return 0;
68 }
69 early_param("nohugevmalloc", set_nohugevmalloc);
70 #else /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
71 static const bool vmap_allow_huge = false;
72 #endif	/* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
73 
is_vmalloc_addr(const void * x)74 bool is_vmalloc_addr(const void *x)
75 {
76 	unsigned long addr = (unsigned long)kasan_reset_tag(x);
77 
78 	return addr >= VMALLOC_START && addr < VMALLOC_END;
79 }
80 EXPORT_SYMBOL(is_vmalloc_addr);
81 
82 struct vfree_deferred {
83 	struct llist_head list;
84 	struct work_struct wq;
85 };
86 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
87 
88 static void __vunmap(const void *, int);
89 
free_work(struct work_struct * w)90 static void free_work(struct work_struct *w)
91 {
92 	struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
93 	struct llist_node *t, *llnode;
94 
95 	llist_for_each_safe(llnode, t, llist_del_all(&p->list))
96 		__vunmap((void *)llnode, 1);
97 }
98 
99 /*** Page table manipulation functions ***/
vmap_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift,pgtbl_mod_mask * mask)100 static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
101 			phys_addr_t phys_addr, pgprot_t prot,
102 			unsigned int max_page_shift, pgtbl_mod_mask *mask)
103 {
104 	pte_t *pte;
105 	u64 pfn;
106 	unsigned long size = PAGE_SIZE;
107 
108 	pfn = phys_addr >> PAGE_SHIFT;
109 	pte = pte_alloc_kernel_track(pmd, addr, mask);
110 	if (!pte)
111 		return -ENOMEM;
112 	do {
113 		BUG_ON(!pte_none(*pte));
114 
115 #ifdef CONFIG_HUGETLB_PAGE
116 		size = arch_vmap_pte_range_map_size(addr, end, pfn, max_page_shift);
117 		if (size != PAGE_SIZE) {
118 			pte_t entry = pfn_pte(pfn, prot);
119 
120 			entry = pte_mkhuge(entry);
121 			entry = arch_make_huge_pte(entry, ilog2(size), 0);
122 			set_huge_pte_at(&init_mm, addr, pte, entry);
123 			pfn += PFN_DOWN(size);
124 			continue;
125 		}
126 #endif
127 		set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
128 		pfn++;
129 	} while (pte += PFN_DOWN(size), addr += size, addr != end);
130 	*mask |= PGTBL_PTE_MODIFIED;
131 	return 0;
132 }
133 
vmap_try_huge_pmd(pmd_t * pmd,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift)134 static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end,
135 			phys_addr_t phys_addr, pgprot_t prot,
136 			unsigned int max_page_shift)
137 {
138 	if (max_page_shift < PMD_SHIFT)
139 		return 0;
140 
141 	if (!arch_vmap_pmd_supported(prot))
142 		return 0;
143 
144 	if ((end - addr) != PMD_SIZE)
145 		return 0;
146 
147 	if (!IS_ALIGNED(addr, PMD_SIZE))
148 		return 0;
149 
150 	if (!IS_ALIGNED(phys_addr, PMD_SIZE))
151 		return 0;
152 
153 	if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
154 		return 0;
155 
156 	return pmd_set_huge(pmd, phys_addr, prot);
157 }
158 
vmap_pmd_range(pud_t * pud,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift,pgtbl_mod_mask * mask)159 static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
160 			phys_addr_t phys_addr, pgprot_t prot,
161 			unsigned int max_page_shift, pgtbl_mod_mask *mask)
162 {
163 	pmd_t *pmd;
164 	unsigned long next;
165 
166 	pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
167 	if (!pmd)
168 		return -ENOMEM;
169 	do {
170 		next = pmd_addr_end(addr, end);
171 
172 		if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot,
173 					max_page_shift)) {
174 			*mask |= PGTBL_PMD_MODIFIED;
175 			continue;
176 		}
177 
178 		if (vmap_pte_range(pmd, addr, next, phys_addr, prot, max_page_shift, mask))
179 			return -ENOMEM;
180 	} while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
181 	return 0;
182 }
183 
vmap_try_huge_pud(pud_t * pud,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift)184 static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end,
185 			phys_addr_t phys_addr, pgprot_t prot,
186 			unsigned int max_page_shift)
187 {
188 	if (max_page_shift < PUD_SHIFT)
189 		return 0;
190 
191 	if (!arch_vmap_pud_supported(prot))
192 		return 0;
193 
194 	if ((end - addr) != PUD_SIZE)
195 		return 0;
196 
197 	if (!IS_ALIGNED(addr, PUD_SIZE))
198 		return 0;
199 
200 	if (!IS_ALIGNED(phys_addr, PUD_SIZE))
201 		return 0;
202 
203 	if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
204 		return 0;
205 
206 	return pud_set_huge(pud, phys_addr, prot);
207 }
208 
vmap_pud_range(p4d_t * p4d,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift,pgtbl_mod_mask * mask)209 static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
210 			phys_addr_t phys_addr, pgprot_t prot,
211 			unsigned int max_page_shift, pgtbl_mod_mask *mask)
212 {
213 	pud_t *pud;
214 	unsigned long next;
215 
216 	pud = pud_alloc_track(&init_mm, p4d, addr, mask);
217 	if (!pud)
218 		return -ENOMEM;
219 	do {
220 		next = pud_addr_end(addr, end);
221 
222 		if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot,
223 					max_page_shift)) {
224 			*mask |= PGTBL_PUD_MODIFIED;
225 			continue;
226 		}
227 
228 		if (vmap_pmd_range(pud, addr, next, phys_addr, prot,
229 					max_page_shift, mask))
230 			return -ENOMEM;
231 	} while (pud++, phys_addr += (next - addr), addr = next, addr != end);
232 	return 0;
233 }
234 
vmap_try_huge_p4d(p4d_t * p4d,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift)235 static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end,
236 			phys_addr_t phys_addr, pgprot_t prot,
237 			unsigned int max_page_shift)
238 {
239 	if (max_page_shift < P4D_SHIFT)
240 		return 0;
241 
242 	if (!arch_vmap_p4d_supported(prot))
243 		return 0;
244 
245 	if ((end - addr) != P4D_SIZE)
246 		return 0;
247 
248 	if (!IS_ALIGNED(addr, P4D_SIZE))
249 		return 0;
250 
251 	if (!IS_ALIGNED(phys_addr, P4D_SIZE))
252 		return 0;
253 
254 	if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
255 		return 0;
256 
257 	return p4d_set_huge(p4d, phys_addr, prot);
258 }
259 
vmap_p4d_range(pgd_t * pgd,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift,pgtbl_mod_mask * mask)260 static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
261 			phys_addr_t phys_addr, pgprot_t prot,
262 			unsigned int max_page_shift, pgtbl_mod_mask *mask)
263 {
264 	p4d_t *p4d;
265 	unsigned long next;
266 
267 	p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
268 	if (!p4d)
269 		return -ENOMEM;
270 	do {
271 		next = p4d_addr_end(addr, end);
272 
273 		if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot,
274 					max_page_shift)) {
275 			*mask |= PGTBL_P4D_MODIFIED;
276 			continue;
277 		}
278 
279 		if (vmap_pud_range(p4d, addr, next, phys_addr, prot,
280 					max_page_shift, mask))
281 			return -ENOMEM;
282 	} while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
283 	return 0;
284 }
285 
vmap_range_noflush(unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift)286 static int vmap_range_noflush(unsigned long addr, unsigned long end,
287 			phys_addr_t phys_addr, pgprot_t prot,
288 			unsigned int max_page_shift)
289 {
290 	pgd_t *pgd;
291 	unsigned long start;
292 	unsigned long next;
293 	int err;
294 	pgtbl_mod_mask mask = 0;
295 
296 	might_sleep();
297 	BUG_ON(addr >= end);
298 
299 	start = addr;
300 	pgd = pgd_offset_k(addr);
301 	do {
302 		next = pgd_addr_end(addr, end);
303 		err = vmap_p4d_range(pgd, addr, next, phys_addr, prot,
304 					max_page_shift, &mask);
305 		if (err)
306 			break;
307 	} while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
308 
309 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
310 		arch_sync_kernel_mappings(start, end);
311 
312 	return err;
313 }
314 
ioremap_page_range(unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot)315 int ioremap_page_range(unsigned long addr, unsigned long end,
316 		phys_addr_t phys_addr, pgprot_t prot)
317 {
318 	int err;
319 
320 	prot = pgprot_nx(prot);
321 	err = vmap_range_noflush(addr, end, phys_addr, prot,
322 				 ioremap_max_page_shift);
323 	flush_cache_vmap(addr, end);
324 
325 	if (IS_ENABLED(CONFIG_ARCH_HAS_IOREMAP_PHYS_HOOKS) && !err)
326 		ioremap_phys_range_hook(phys_addr, end - addr, prot);
327 
328 	return err;
329 }
330 
vunmap_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,pgtbl_mod_mask * mask)331 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
332 			     pgtbl_mod_mask *mask)
333 {
334 	pte_t *pte;
335 
336 	pte = pte_offset_kernel(pmd, addr);
337 	do {
338 		pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
339 		WARN_ON(!pte_none(ptent) && !pte_present(ptent));
340 	} while (pte++, addr += PAGE_SIZE, addr != end);
341 	*mask |= PGTBL_PTE_MODIFIED;
342 }
343 
vunmap_pmd_range(pud_t * pud,unsigned long addr,unsigned long end,pgtbl_mod_mask * mask)344 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
345 			     pgtbl_mod_mask *mask)
346 {
347 	pmd_t *pmd;
348 	unsigned long next;
349 	int cleared;
350 
351 	pmd = pmd_offset(pud, addr);
352 	do {
353 		next = pmd_addr_end(addr, end);
354 
355 		cleared = pmd_clear_huge(pmd);
356 		if (cleared || pmd_bad(*pmd))
357 			*mask |= PGTBL_PMD_MODIFIED;
358 
359 		if (cleared)
360 			continue;
361 		if (pmd_none_or_clear_bad(pmd))
362 			continue;
363 		vunmap_pte_range(pmd, addr, next, mask);
364 
365 		cond_resched();
366 	} while (pmd++, addr = next, addr != end);
367 }
368 
vunmap_pud_range(p4d_t * p4d,unsigned long addr,unsigned long end,pgtbl_mod_mask * mask)369 static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
370 			     pgtbl_mod_mask *mask)
371 {
372 	pud_t *pud;
373 	unsigned long next;
374 	int cleared;
375 
376 	pud = pud_offset(p4d, addr);
377 	do {
378 		next = pud_addr_end(addr, end);
379 
380 		cleared = pud_clear_huge(pud);
381 		if (cleared || pud_bad(*pud))
382 			*mask |= PGTBL_PUD_MODIFIED;
383 
384 		if (cleared)
385 			continue;
386 		if (pud_none_or_clear_bad(pud))
387 			continue;
388 		vunmap_pmd_range(pud, addr, next, mask);
389 	} while (pud++, addr = next, addr != end);
390 }
391 
vunmap_p4d_range(pgd_t * pgd,unsigned long addr,unsigned long end,pgtbl_mod_mask * mask)392 static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
393 			     pgtbl_mod_mask *mask)
394 {
395 	p4d_t *p4d;
396 	unsigned long next;
397 	int cleared;
398 
399 	p4d = p4d_offset(pgd, addr);
400 	do {
401 		next = p4d_addr_end(addr, end);
402 
403 		cleared = p4d_clear_huge(p4d);
404 		if (cleared || p4d_bad(*p4d))
405 			*mask |= PGTBL_P4D_MODIFIED;
406 
407 		if (cleared)
408 			continue;
409 		if (p4d_none_or_clear_bad(p4d))
410 			continue;
411 		vunmap_pud_range(p4d, addr, next, mask);
412 	} while (p4d++, addr = next, addr != end);
413 }
414 
415 /*
416  * vunmap_range_noflush is similar to vunmap_range, but does not
417  * flush caches or TLBs.
418  *
419  * The caller is responsible for calling flush_cache_vmap() before calling
420  * this function, and flush_tlb_kernel_range after it has returned
421  * successfully (and before the addresses are expected to cause a page fault
422  * or be re-mapped for something else, if TLB flushes are being delayed or
423  * coalesced).
424  *
425  * This is an internal function only. Do not use outside mm/.
426  */
vunmap_range_noflush(unsigned long start,unsigned long end)427 void vunmap_range_noflush(unsigned long start, unsigned long end)
428 {
429 	unsigned long next;
430 	pgd_t *pgd;
431 	unsigned long addr = start;
432 	pgtbl_mod_mask mask = 0;
433 
434 	BUG_ON(addr >= end);
435 	pgd = pgd_offset_k(addr);
436 	do {
437 		next = pgd_addr_end(addr, end);
438 		if (pgd_bad(*pgd))
439 			mask |= PGTBL_PGD_MODIFIED;
440 		if (pgd_none_or_clear_bad(pgd))
441 			continue;
442 		vunmap_p4d_range(pgd, addr, next, &mask);
443 	} while (pgd++, addr = next, addr != end);
444 
445 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
446 		arch_sync_kernel_mappings(start, end);
447 }
448 
449 /**
450  * vunmap_range - unmap kernel virtual addresses
451  * @addr: start of the VM area to unmap
452  * @end: end of the VM area to unmap (non-inclusive)
453  *
454  * Clears any present PTEs in the virtual address range, flushes TLBs and
455  * caches. Any subsequent access to the address before it has been re-mapped
456  * is a kernel bug.
457  */
vunmap_range(unsigned long addr,unsigned long end)458 void vunmap_range(unsigned long addr, unsigned long end)
459 {
460 	flush_cache_vunmap(addr, end);
461 	vunmap_range_noflush(addr, end);
462 	flush_tlb_kernel_range(addr, end);
463 }
464 
vmap_pages_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,int * nr,pgtbl_mod_mask * mask)465 static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
466 		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
467 		pgtbl_mod_mask *mask)
468 {
469 	pte_t *pte;
470 
471 	/*
472 	 * nr is a running index into the array which helps higher level
473 	 * callers keep track of where we're up to.
474 	 */
475 
476 	pte = pte_alloc_kernel_track(pmd, addr, mask);
477 	if (!pte)
478 		return -ENOMEM;
479 	do {
480 		struct page *page = pages[*nr];
481 
482 		if (WARN_ON(!pte_none(*pte)))
483 			return -EBUSY;
484 		if (WARN_ON(!page))
485 			return -ENOMEM;
486 		set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
487 		(*nr)++;
488 	} while (pte++, addr += PAGE_SIZE, addr != end);
489 	*mask |= PGTBL_PTE_MODIFIED;
490 	return 0;
491 }
492 
vmap_pages_pmd_range(pud_t * pud,unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,int * nr,pgtbl_mod_mask * mask)493 static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr,
494 		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
495 		pgtbl_mod_mask *mask)
496 {
497 	pmd_t *pmd;
498 	unsigned long next;
499 
500 	pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
501 	if (!pmd)
502 		return -ENOMEM;
503 	do {
504 		next = pmd_addr_end(addr, end);
505 		if (vmap_pages_pte_range(pmd, addr, next, prot, pages, nr, mask))
506 			return -ENOMEM;
507 	} while (pmd++, addr = next, addr != end);
508 	return 0;
509 }
510 
vmap_pages_pud_range(p4d_t * p4d,unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,int * nr,pgtbl_mod_mask * mask)511 static int vmap_pages_pud_range(p4d_t *p4d, unsigned long addr,
512 		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
513 		pgtbl_mod_mask *mask)
514 {
515 	pud_t *pud;
516 	unsigned long next;
517 
518 	pud = pud_alloc_track(&init_mm, p4d, addr, mask);
519 	if (!pud)
520 		return -ENOMEM;
521 	do {
522 		next = pud_addr_end(addr, end);
523 		if (vmap_pages_pmd_range(pud, addr, next, prot, pages, nr, mask))
524 			return -ENOMEM;
525 	} while (pud++, addr = next, addr != end);
526 	return 0;
527 }
528 
vmap_pages_p4d_range(pgd_t * pgd,unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,int * nr,pgtbl_mod_mask * mask)529 static int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr,
530 		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
531 		pgtbl_mod_mask *mask)
532 {
533 	p4d_t *p4d;
534 	unsigned long next;
535 
536 	p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
537 	if (!p4d)
538 		return -ENOMEM;
539 	do {
540 		next = p4d_addr_end(addr, end);
541 		if (vmap_pages_pud_range(p4d, addr, next, prot, pages, nr, mask))
542 			return -ENOMEM;
543 	} while (p4d++, addr = next, addr != end);
544 	return 0;
545 }
546 
vmap_small_pages_range_noflush(unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages)547 static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end,
548 		pgprot_t prot, struct page **pages)
549 {
550 	unsigned long start = addr;
551 	pgd_t *pgd;
552 	unsigned long next;
553 	int err = 0;
554 	int nr = 0;
555 	pgtbl_mod_mask mask = 0;
556 
557 	BUG_ON(addr >= end);
558 	pgd = pgd_offset_k(addr);
559 	do {
560 		next = pgd_addr_end(addr, end);
561 		if (pgd_bad(*pgd))
562 			mask |= PGTBL_PGD_MODIFIED;
563 		err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask);
564 		if (err)
565 			return err;
566 	} while (pgd++, addr = next, addr != end);
567 
568 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
569 		arch_sync_kernel_mappings(start, end);
570 
571 	return 0;
572 }
573 
574 /*
575  * vmap_pages_range_noflush is similar to vmap_pages_range, but does not
576  * flush caches.
577  *
578  * The caller is responsible for calling flush_cache_vmap() after this
579  * function returns successfully and before the addresses are accessed.
580  *
581  * This is an internal function only. Do not use outside mm/.
582  */
vmap_pages_range_noflush(unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,unsigned int page_shift)583 int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
584 		pgprot_t prot, struct page **pages, unsigned int page_shift)
585 {
586 	unsigned int i, nr = (end - addr) >> PAGE_SHIFT;
587 
588 	WARN_ON(page_shift < PAGE_SHIFT);
589 
590 	if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC) ||
591 			page_shift == PAGE_SHIFT)
592 		return vmap_small_pages_range_noflush(addr, end, prot, pages);
593 
594 	for (i = 0; i < nr; i += 1U << (page_shift - PAGE_SHIFT)) {
595 		int err;
596 
597 		err = vmap_range_noflush(addr, addr + (1UL << page_shift),
598 					__pa(page_address(pages[i])), prot,
599 					page_shift);
600 		if (err)
601 			return err;
602 
603 		addr += 1UL << page_shift;
604 	}
605 
606 	return 0;
607 }
608 
609 /**
610  * vmap_pages_range - map pages to a kernel virtual address
611  * @addr: start of the VM area to map
612  * @end: end of the VM area to map (non-inclusive)
613  * @prot: page protection flags to use
614  * @pages: pages to map (always PAGE_SIZE pages)
615  * @page_shift: maximum shift that the pages may be mapped with, @pages must
616  * be aligned and contiguous up to at least this shift.
617  *
618  * RETURNS:
619  * 0 on success, -errno on failure.
620  */
vmap_pages_range(unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,unsigned int page_shift)621 static int vmap_pages_range(unsigned long addr, unsigned long end,
622 		pgprot_t prot, struct page **pages, unsigned int page_shift)
623 {
624 	int err;
625 
626 	err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
627 	flush_cache_vmap(addr, end);
628 	return err;
629 }
630 
is_vmalloc_or_module_addr(const void * x)631 int is_vmalloc_or_module_addr(const void *x)
632 {
633 	/*
634 	 * ARM, x86-64 and sparc64 put modules in a special place,
635 	 * and fall back on vmalloc() if that fails. Others
636 	 * just put it in the vmalloc space.
637 	 */
638 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
639 	unsigned long addr = (unsigned long)kasan_reset_tag(x);
640 	if (addr >= MODULES_VADDR && addr < MODULES_END)
641 		return 1;
642 #endif
643 	return is_vmalloc_addr(x);
644 }
645 
646 /*
647  * Walk a vmap address to the struct page it maps. Huge vmap mappings will
648  * return the tail page that corresponds to the base page address, which
649  * matches small vmap mappings.
650  */
vmalloc_to_page(const void * vmalloc_addr)651 struct page *vmalloc_to_page(const void *vmalloc_addr)
652 {
653 	unsigned long addr = (unsigned long) vmalloc_addr;
654 	struct page *page = NULL;
655 	pgd_t *pgd = pgd_offset_k(addr);
656 	p4d_t *p4d;
657 	pud_t *pud;
658 	pmd_t *pmd;
659 	pte_t *ptep, pte;
660 
661 	/*
662 	 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
663 	 * architectures that do not vmalloc module space
664 	 */
665 	VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
666 
667 	if (pgd_none(*pgd))
668 		return NULL;
669 	if (WARN_ON_ONCE(pgd_leaf(*pgd)))
670 		return NULL; /* XXX: no allowance for huge pgd */
671 	if (WARN_ON_ONCE(pgd_bad(*pgd)))
672 		return NULL;
673 
674 	p4d = p4d_offset(pgd, addr);
675 	if (p4d_none(*p4d))
676 		return NULL;
677 	if (p4d_leaf(*p4d))
678 		return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT);
679 	if (WARN_ON_ONCE(p4d_bad(*p4d)))
680 		return NULL;
681 
682 	pud = pud_offset(p4d, addr);
683 	if (pud_none(*pud))
684 		return NULL;
685 	if (pud_leaf(*pud))
686 		return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
687 	if (WARN_ON_ONCE(pud_bad(*pud)))
688 		return NULL;
689 
690 	pmd = pmd_offset(pud, addr);
691 	if (pmd_none(*pmd))
692 		return NULL;
693 	if (pmd_leaf(*pmd))
694 		return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
695 	if (WARN_ON_ONCE(pmd_bad(*pmd)))
696 		return NULL;
697 
698 	ptep = pte_offset_map(pmd, addr);
699 	pte = *ptep;
700 	if (pte_present(pte))
701 		page = pte_page(pte);
702 	pte_unmap(ptep);
703 
704 	return page;
705 }
706 EXPORT_SYMBOL(vmalloc_to_page);
707 
708 /*
709  * Map a vmalloc()-space virtual address to the physical page frame number.
710  */
vmalloc_to_pfn(const void * vmalloc_addr)711 unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
712 {
713 	return page_to_pfn(vmalloc_to_page(vmalloc_addr));
714 }
715 EXPORT_SYMBOL(vmalloc_to_pfn);
716 
717 
718 /*** Global kva allocator ***/
719 
720 #define DEBUG_AUGMENT_PROPAGATE_CHECK 0
721 #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
722 
723 
724 static DEFINE_SPINLOCK(vmap_area_lock);
725 static DEFINE_SPINLOCK(free_vmap_area_lock);
726 /* Export for kexec only */
727 LIST_HEAD(vmap_area_list);
728 static struct rb_root vmap_area_root = RB_ROOT;
729 static bool vmap_initialized __read_mostly;
730 
731 static struct rb_root purge_vmap_area_root = RB_ROOT;
732 static LIST_HEAD(purge_vmap_area_list);
733 static DEFINE_SPINLOCK(purge_vmap_area_lock);
734 
735 /*
736  * This kmem_cache is used for vmap_area objects. Instead of
737  * allocating from slab we reuse an object from this cache to
738  * make things faster. Especially in "no edge" splitting of
739  * free block.
740  */
741 static struct kmem_cache *vmap_area_cachep;
742 
743 /*
744  * This linked list is used in pair with free_vmap_area_root.
745  * It gives O(1) access to prev/next to perform fast coalescing.
746  */
747 static LIST_HEAD(free_vmap_area_list);
748 
749 /*
750  * This augment red-black tree represents the free vmap space.
751  * All vmap_area objects in this tree are sorted by va->va_start
752  * address. It is used for allocation and merging when a vmap
753  * object is released.
754  *
755  * Each vmap_area node contains a maximum available free block
756  * of its sub-tree, right or left. Therefore it is possible to
757  * find a lowest match of free area.
758  */
759 static struct rb_root free_vmap_area_root = RB_ROOT;
760 
761 /*
762  * Preload a CPU with one object for "no edge" split case. The
763  * aim is to get rid of allocations from the atomic context, thus
764  * to use more permissive allocation masks.
765  */
766 static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node);
767 
768 static __always_inline unsigned long
va_size(struct vmap_area * va)769 va_size(struct vmap_area *va)
770 {
771 	return (va->va_end - va->va_start);
772 }
773 
774 static __always_inline unsigned long
get_subtree_max_size(struct rb_node * node)775 get_subtree_max_size(struct rb_node *node)
776 {
777 	struct vmap_area *va;
778 
779 	va = rb_entry_safe(node, struct vmap_area, rb_node);
780 	return va ? va->subtree_max_size : 0;
781 }
782 
783 /*
784  * Gets called when remove the node and rotate.
785  */
786 static __always_inline unsigned long
compute_subtree_max_size(struct vmap_area * va)787 compute_subtree_max_size(struct vmap_area *va)
788 {
789 	return max3(va_size(va),
790 		get_subtree_max_size(va->rb_node.rb_left),
791 		get_subtree_max_size(va->rb_node.rb_right));
792 }
793 
794 RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb,
795 	struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size)
796 
797 static void purge_vmap_area_lazy(void);
798 static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
799 static unsigned long lazy_max_pages(void);
800 
801 static atomic_long_t nr_vmalloc_pages;
802 
vmalloc_nr_pages(void)803 unsigned long vmalloc_nr_pages(void)
804 {
805 	return atomic_long_read(&nr_vmalloc_pages);
806 }
807 EXPORT_SYMBOL_GPL(vmalloc_nr_pages);
808 
find_vmap_area_exceed_addr(unsigned long addr)809 static struct vmap_area *find_vmap_area_exceed_addr(unsigned long addr)
810 {
811 	struct vmap_area *va = NULL;
812 	struct rb_node *n = vmap_area_root.rb_node;
813 
814 	addr = (unsigned long)kasan_reset_tag((void *)addr);
815 
816 	while (n) {
817 		struct vmap_area *tmp;
818 
819 		tmp = rb_entry(n, struct vmap_area, rb_node);
820 		if (tmp->va_end > addr) {
821 			va = tmp;
822 			if (tmp->va_start <= addr)
823 				break;
824 
825 			n = n->rb_left;
826 		} else
827 			n = n->rb_right;
828 	}
829 
830 	return va;
831 }
832 
__find_vmap_area(unsigned long addr)833 static struct vmap_area *__find_vmap_area(unsigned long addr)
834 {
835 	struct rb_node *n = vmap_area_root.rb_node;
836 
837 	addr = (unsigned long)kasan_reset_tag((void *)addr);
838 
839 	while (n) {
840 		struct vmap_area *va;
841 
842 		va = rb_entry(n, struct vmap_area, rb_node);
843 		if (addr < va->va_start)
844 			n = n->rb_left;
845 		else if (addr >= va->va_end)
846 			n = n->rb_right;
847 		else
848 			return va;
849 	}
850 
851 	return NULL;
852 }
853 
854 /*
855  * This function returns back addresses of parent node
856  * and its left or right link for further processing.
857  *
858  * Otherwise NULL is returned. In that case all further
859  * steps regarding inserting of conflicting overlap range
860  * have to be declined and actually considered as a bug.
861  */
862 static __always_inline struct rb_node **
find_va_links(struct vmap_area * va,struct rb_root * root,struct rb_node * from,struct rb_node ** parent)863 find_va_links(struct vmap_area *va,
864 	struct rb_root *root, struct rb_node *from,
865 	struct rb_node **parent)
866 {
867 	struct vmap_area *tmp_va;
868 	struct rb_node **link;
869 
870 	if (root) {
871 		link = &root->rb_node;
872 		if (unlikely(!*link)) {
873 			*parent = NULL;
874 			return link;
875 		}
876 	} else {
877 		link = &from;
878 	}
879 
880 	/*
881 	 * Go to the bottom of the tree. When we hit the last point
882 	 * we end up with parent rb_node and correct direction, i name
883 	 * it link, where the new va->rb_node will be attached to.
884 	 */
885 	do {
886 		tmp_va = rb_entry(*link, struct vmap_area, rb_node);
887 
888 		/*
889 		 * During the traversal we also do some sanity check.
890 		 * Trigger the BUG() if there are sides(left/right)
891 		 * or full overlaps.
892 		 */
893 		if (va->va_start < tmp_va->va_end &&
894 				va->va_end <= tmp_va->va_start)
895 			link = &(*link)->rb_left;
896 		else if (va->va_end > tmp_va->va_start &&
897 				va->va_start >= tmp_va->va_end)
898 			link = &(*link)->rb_right;
899 		else {
900 			WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n",
901 				va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end);
902 
903 			return NULL;
904 		}
905 	} while (*link);
906 
907 	*parent = &tmp_va->rb_node;
908 	return link;
909 }
910 
911 static __always_inline struct list_head *
get_va_next_sibling(struct rb_node * parent,struct rb_node ** link)912 get_va_next_sibling(struct rb_node *parent, struct rb_node **link)
913 {
914 	struct list_head *list;
915 
916 	if (unlikely(!parent))
917 		/*
918 		 * The red-black tree where we try to find VA neighbors
919 		 * before merging or inserting is empty, i.e. it means
920 		 * there is no free vmap space. Normally it does not
921 		 * happen but we handle this case anyway.
922 		 */
923 		return NULL;
924 
925 	list = &rb_entry(parent, struct vmap_area, rb_node)->list;
926 	return (&parent->rb_right == link ? list->next : list);
927 }
928 
929 static __always_inline void
link_va(struct vmap_area * va,struct rb_root * root,struct rb_node * parent,struct rb_node ** link,struct list_head * head)930 link_va(struct vmap_area *va, struct rb_root *root,
931 	struct rb_node *parent, struct rb_node **link, struct list_head *head)
932 {
933 	/*
934 	 * VA is still not in the list, but we can
935 	 * identify its future previous list_head node.
936 	 */
937 	if (likely(parent)) {
938 		head = &rb_entry(parent, struct vmap_area, rb_node)->list;
939 		if (&parent->rb_right != link)
940 			head = head->prev;
941 	}
942 
943 	/* Insert to the rb-tree */
944 	rb_link_node(&va->rb_node, parent, link);
945 	if (root == &free_vmap_area_root) {
946 		/*
947 		 * Some explanation here. Just perform simple insertion
948 		 * to the tree. We do not set va->subtree_max_size to
949 		 * its current size before calling rb_insert_augmented().
950 		 * It is because of we populate the tree from the bottom
951 		 * to parent levels when the node _is_ in the tree.
952 		 *
953 		 * Therefore we set subtree_max_size to zero after insertion,
954 		 * to let __augment_tree_propagate_from() puts everything to
955 		 * the correct order later on.
956 		 */
957 		rb_insert_augmented(&va->rb_node,
958 			root, &free_vmap_area_rb_augment_cb);
959 		va->subtree_max_size = 0;
960 	} else {
961 		rb_insert_color(&va->rb_node, root);
962 	}
963 
964 	/* Address-sort this list */
965 	list_add(&va->list, head);
966 }
967 
968 static __always_inline void
unlink_va(struct vmap_area * va,struct rb_root * root)969 unlink_va(struct vmap_area *va, struct rb_root *root)
970 {
971 	if (WARN_ON(RB_EMPTY_NODE(&va->rb_node)))
972 		return;
973 
974 	if (root == &free_vmap_area_root)
975 		rb_erase_augmented(&va->rb_node,
976 			root, &free_vmap_area_rb_augment_cb);
977 	else
978 		rb_erase(&va->rb_node, root);
979 
980 	list_del(&va->list);
981 	RB_CLEAR_NODE(&va->rb_node);
982 }
983 
984 #if DEBUG_AUGMENT_PROPAGATE_CHECK
985 static void
augment_tree_propagate_check(void)986 augment_tree_propagate_check(void)
987 {
988 	struct vmap_area *va;
989 	unsigned long computed_size;
990 
991 	list_for_each_entry(va, &free_vmap_area_list, list) {
992 		computed_size = compute_subtree_max_size(va);
993 		if (computed_size != va->subtree_max_size)
994 			pr_emerg("tree is corrupted: %lu, %lu\n",
995 				va_size(va), va->subtree_max_size);
996 	}
997 }
998 #endif
999 
1000 /*
1001  * This function populates subtree_max_size from bottom to upper
1002  * levels starting from VA point. The propagation must be done
1003  * when VA size is modified by changing its va_start/va_end. Or
1004  * in case of newly inserting of VA to the tree.
1005  *
1006  * It means that __augment_tree_propagate_from() must be called:
1007  * - After VA has been inserted to the tree(free path);
1008  * - After VA has been shrunk(allocation path);
1009  * - After VA has been increased(merging path).
1010  *
1011  * Please note that, it does not mean that upper parent nodes
1012  * and their subtree_max_size are recalculated all the time up
1013  * to the root node.
1014  *
1015  *       4--8
1016  *        /\
1017  *       /  \
1018  *      /    \
1019  *    2--2  8--8
1020  *
1021  * For example if we modify the node 4, shrinking it to 2, then
1022  * no any modification is required. If we shrink the node 2 to 1
1023  * its subtree_max_size is updated only, and set to 1. If we shrink
1024  * the node 8 to 6, then its subtree_max_size is set to 6 and parent
1025  * node becomes 4--6.
1026  */
1027 static __always_inline void
augment_tree_propagate_from(struct vmap_area * va)1028 augment_tree_propagate_from(struct vmap_area *va)
1029 {
1030 	/*
1031 	 * Populate the tree from bottom towards the root until
1032 	 * the calculated maximum available size of checked node
1033 	 * is equal to its current one.
1034 	 */
1035 	free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL);
1036 
1037 #if DEBUG_AUGMENT_PROPAGATE_CHECK
1038 	augment_tree_propagate_check();
1039 #endif
1040 }
1041 
1042 static void
insert_vmap_area(struct vmap_area * va,struct rb_root * root,struct list_head * head)1043 insert_vmap_area(struct vmap_area *va,
1044 	struct rb_root *root, struct list_head *head)
1045 {
1046 	struct rb_node **link;
1047 	struct rb_node *parent;
1048 
1049 	link = find_va_links(va, root, NULL, &parent);
1050 	if (link)
1051 		link_va(va, root, parent, link, head);
1052 }
1053 
1054 static void
insert_vmap_area_augment(struct vmap_area * va,struct rb_node * from,struct rb_root * root,struct list_head * head)1055 insert_vmap_area_augment(struct vmap_area *va,
1056 	struct rb_node *from, struct rb_root *root,
1057 	struct list_head *head)
1058 {
1059 	struct rb_node **link;
1060 	struct rb_node *parent;
1061 
1062 	if (from)
1063 		link = find_va_links(va, NULL, from, &parent);
1064 	else
1065 		link = find_va_links(va, root, NULL, &parent);
1066 
1067 	if (link) {
1068 		link_va(va, root, parent, link, head);
1069 		augment_tree_propagate_from(va);
1070 	}
1071 }
1072 
1073 /*
1074  * Merge de-allocated chunk of VA memory with previous
1075  * and next free blocks. If coalesce is not done a new
1076  * free area is inserted. If VA has been merged, it is
1077  * freed.
1078  *
1079  * Please note, it can return NULL in case of overlap
1080  * ranges, followed by WARN() report. Despite it is a
1081  * buggy behaviour, a system can be alive and keep
1082  * ongoing.
1083  */
1084 static __always_inline struct vmap_area *
merge_or_add_vmap_area(struct vmap_area * va,struct rb_root * root,struct list_head * head)1085 merge_or_add_vmap_area(struct vmap_area *va,
1086 	struct rb_root *root, struct list_head *head)
1087 {
1088 	struct vmap_area *sibling;
1089 	struct list_head *next;
1090 	struct rb_node **link;
1091 	struct rb_node *parent;
1092 	bool merged = false;
1093 
1094 	/*
1095 	 * Find a place in the tree where VA potentially will be
1096 	 * inserted, unless it is merged with its sibling/siblings.
1097 	 */
1098 	link = find_va_links(va, root, NULL, &parent);
1099 	if (!link)
1100 		return NULL;
1101 
1102 	/*
1103 	 * Get next node of VA to check if merging can be done.
1104 	 */
1105 	next = get_va_next_sibling(parent, link);
1106 	if (unlikely(next == NULL))
1107 		goto insert;
1108 
1109 	/*
1110 	 * start            end
1111 	 * |                |
1112 	 * |<------VA------>|<-----Next----->|
1113 	 *                  |                |
1114 	 *                  start            end
1115 	 */
1116 	if (next != head) {
1117 		sibling = list_entry(next, struct vmap_area, list);
1118 		if (sibling->va_start == va->va_end) {
1119 			sibling->va_start = va->va_start;
1120 
1121 			/* Free vmap_area object. */
1122 			kmem_cache_free(vmap_area_cachep, va);
1123 
1124 			/* Point to the new merged area. */
1125 			va = sibling;
1126 			merged = true;
1127 		}
1128 	}
1129 
1130 	/*
1131 	 * start            end
1132 	 * |                |
1133 	 * |<-----Prev----->|<------VA------>|
1134 	 *                  |                |
1135 	 *                  start            end
1136 	 */
1137 	if (next->prev != head) {
1138 		sibling = list_entry(next->prev, struct vmap_area, list);
1139 		if (sibling->va_end == va->va_start) {
1140 			/*
1141 			 * If both neighbors are coalesced, it is important
1142 			 * to unlink the "next" node first, followed by merging
1143 			 * with "previous" one. Otherwise the tree might not be
1144 			 * fully populated if a sibling's augmented value is
1145 			 * "normalized" because of rotation operations.
1146 			 */
1147 			if (merged)
1148 				unlink_va(va, root);
1149 
1150 			sibling->va_end = va->va_end;
1151 
1152 			/* Free vmap_area object. */
1153 			kmem_cache_free(vmap_area_cachep, va);
1154 
1155 			/* Point to the new merged area. */
1156 			va = sibling;
1157 			merged = true;
1158 		}
1159 	}
1160 
1161 insert:
1162 	if (!merged)
1163 		link_va(va, root, parent, link, head);
1164 
1165 	return va;
1166 }
1167 
1168 static __always_inline struct vmap_area *
merge_or_add_vmap_area_augment(struct vmap_area * va,struct rb_root * root,struct list_head * head)1169 merge_or_add_vmap_area_augment(struct vmap_area *va,
1170 	struct rb_root *root, struct list_head *head)
1171 {
1172 	va = merge_or_add_vmap_area(va, root, head);
1173 	if (va)
1174 		augment_tree_propagate_from(va);
1175 
1176 	return va;
1177 }
1178 
1179 static __always_inline bool
is_within_this_va(struct vmap_area * va,unsigned long size,unsigned long align,unsigned long vstart)1180 is_within_this_va(struct vmap_area *va, unsigned long size,
1181 	unsigned long align, unsigned long vstart)
1182 {
1183 	unsigned long nva_start_addr;
1184 
1185 	if (va->va_start > vstart)
1186 		nva_start_addr = ALIGN(va->va_start, align);
1187 	else
1188 		nva_start_addr = ALIGN(vstart, align);
1189 
1190 	/* Can be overflowed due to big size or alignment. */
1191 	if (nva_start_addr + size < nva_start_addr ||
1192 			nva_start_addr < vstart)
1193 		return false;
1194 
1195 	return (nva_start_addr + size <= va->va_end);
1196 }
1197 
1198 /*
1199  * Find the first free block(lowest start address) in the tree,
1200  * that will accomplish the request corresponding to passing
1201  * parameters.
1202  */
1203 static __always_inline struct vmap_area *
find_vmap_lowest_match(unsigned long size,unsigned long align,unsigned long vstart)1204 find_vmap_lowest_match(unsigned long size,
1205 	unsigned long align, unsigned long vstart)
1206 {
1207 	struct vmap_area *va;
1208 	struct rb_node *node;
1209 	unsigned long length;
1210 
1211 	/* Start from the root. */
1212 	node = free_vmap_area_root.rb_node;
1213 
1214 	/* Adjust the search size for alignment overhead. */
1215 	length = size + align - 1;
1216 
1217 	while (node) {
1218 		va = rb_entry(node, struct vmap_area, rb_node);
1219 
1220 		if (get_subtree_max_size(node->rb_left) >= length &&
1221 				vstart < va->va_start) {
1222 			node = node->rb_left;
1223 		} else {
1224 			if (is_within_this_va(va, size, align, vstart))
1225 				return va;
1226 
1227 			/*
1228 			 * Does not make sense to go deeper towards the right
1229 			 * sub-tree if it does not have a free block that is
1230 			 * equal or bigger to the requested search length.
1231 			 */
1232 			if (get_subtree_max_size(node->rb_right) >= length) {
1233 				node = node->rb_right;
1234 				continue;
1235 			}
1236 
1237 			/*
1238 			 * OK. We roll back and find the first right sub-tree,
1239 			 * that will satisfy the search criteria. It can happen
1240 			 * only once due to "vstart" restriction.
1241 			 */
1242 			while ((node = rb_parent(node))) {
1243 				va = rb_entry(node, struct vmap_area, rb_node);
1244 				if (is_within_this_va(va, size, align, vstart))
1245 					return va;
1246 
1247 				if (get_subtree_max_size(node->rb_right) >= length &&
1248 						vstart <= va->va_start) {
1249 					node = node->rb_right;
1250 					break;
1251 				}
1252 			}
1253 		}
1254 	}
1255 
1256 	return NULL;
1257 }
1258 
1259 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1260 #include <linux/random.h>
1261 
1262 static struct vmap_area *
find_vmap_lowest_linear_match(unsigned long size,unsigned long align,unsigned long vstart)1263 find_vmap_lowest_linear_match(unsigned long size,
1264 	unsigned long align, unsigned long vstart)
1265 {
1266 	struct vmap_area *va;
1267 
1268 	list_for_each_entry(va, &free_vmap_area_list, list) {
1269 		if (!is_within_this_va(va, size, align, vstart))
1270 			continue;
1271 
1272 		return va;
1273 	}
1274 
1275 	return NULL;
1276 }
1277 
1278 static void
find_vmap_lowest_match_check(unsigned long size)1279 find_vmap_lowest_match_check(unsigned long size)
1280 {
1281 	struct vmap_area *va_1, *va_2;
1282 	unsigned long vstart;
1283 	unsigned int rnd;
1284 
1285 	get_random_bytes(&rnd, sizeof(rnd));
1286 	vstart = VMALLOC_START + rnd;
1287 
1288 	va_1 = find_vmap_lowest_match(size, 1, vstart);
1289 	va_2 = find_vmap_lowest_linear_match(size, 1, vstart);
1290 
1291 	if (va_1 != va_2)
1292 		pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n",
1293 			va_1, va_2, vstart);
1294 }
1295 #endif
1296 
1297 enum fit_type {
1298 	NOTHING_FIT = 0,
1299 	FL_FIT_TYPE = 1,	/* full fit */
1300 	LE_FIT_TYPE = 2,	/* left edge fit */
1301 	RE_FIT_TYPE = 3,	/* right edge fit */
1302 	NE_FIT_TYPE = 4		/* no edge fit */
1303 };
1304 
1305 static __always_inline enum fit_type
classify_va_fit_type(struct vmap_area * va,unsigned long nva_start_addr,unsigned long size)1306 classify_va_fit_type(struct vmap_area *va,
1307 	unsigned long nva_start_addr, unsigned long size)
1308 {
1309 	enum fit_type type;
1310 
1311 	/* Check if it is within VA. */
1312 	if (nva_start_addr < va->va_start ||
1313 			nva_start_addr + size > va->va_end)
1314 		return NOTHING_FIT;
1315 
1316 	/* Now classify. */
1317 	if (va->va_start == nva_start_addr) {
1318 		if (va->va_end == nva_start_addr + size)
1319 			type = FL_FIT_TYPE;
1320 		else
1321 			type = LE_FIT_TYPE;
1322 	} else if (va->va_end == nva_start_addr + size) {
1323 		type = RE_FIT_TYPE;
1324 	} else {
1325 		type = NE_FIT_TYPE;
1326 	}
1327 
1328 	return type;
1329 }
1330 
1331 static __always_inline int
adjust_va_to_fit_type(struct vmap_area * va,unsigned long nva_start_addr,unsigned long size,enum fit_type type)1332 adjust_va_to_fit_type(struct vmap_area *va,
1333 	unsigned long nva_start_addr, unsigned long size,
1334 	enum fit_type type)
1335 {
1336 	struct vmap_area *lva = NULL;
1337 
1338 	if (type == FL_FIT_TYPE) {
1339 		/*
1340 		 * No need to split VA, it fully fits.
1341 		 *
1342 		 * |               |
1343 		 * V      NVA      V
1344 		 * |---------------|
1345 		 */
1346 		unlink_va(va, &free_vmap_area_root);
1347 		kmem_cache_free(vmap_area_cachep, va);
1348 	} else if (type == LE_FIT_TYPE) {
1349 		/*
1350 		 * Split left edge of fit VA.
1351 		 *
1352 		 * |       |
1353 		 * V  NVA  V   R
1354 		 * |-------|-------|
1355 		 */
1356 		va->va_start += size;
1357 	} else if (type == RE_FIT_TYPE) {
1358 		/*
1359 		 * Split right edge of fit VA.
1360 		 *
1361 		 *         |       |
1362 		 *     L   V  NVA  V
1363 		 * |-------|-------|
1364 		 */
1365 		va->va_end = nva_start_addr;
1366 	} else if (type == NE_FIT_TYPE) {
1367 		/*
1368 		 * Split no edge of fit VA.
1369 		 *
1370 		 *     |       |
1371 		 *   L V  NVA  V R
1372 		 * |---|-------|---|
1373 		 */
1374 		lva = __this_cpu_xchg(ne_fit_preload_node, NULL);
1375 		if (unlikely(!lva)) {
1376 			/*
1377 			 * For percpu allocator we do not do any pre-allocation
1378 			 * and leave it as it is. The reason is it most likely
1379 			 * never ends up with NE_FIT_TYPE splitting. In case of
1380 			 * percpu allocations offsets and sizes are aligned to
1381 			 * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE
1382 			 * are its main fitting cases.
1383 			 *
1384 			 * There are a few exceptions though, as an example it is
1385 			 * a first allocation (early boot up) when we have "one"
1386 			 * big free space that has to be split.
1387 			 *
1388 			 * Also we can hit this path in case of regular "vmap"
1389 			 * allocations, if "this" current CPU was not preloaded.
1390 			 * See the comment in alloc_vmap_area() why. If so, then
1391 			 * GFP_NOWAIT is used instead to get an extra object for
1392 			 * split purpose. That is rare and most time does not
1393 			 * occur.
1394 			 *
1395 			 * What happens if an allocation gets failed. Basically,
1396 			 * an "overflow" path is triggered to purge lazily freed
1397 			 * areas to free some memory, then, the "retry" path is
1398 			 * triggered to repeat one more time. See more details
1399 			 * in alloc_vmap_area() function.
1400 			 */
1401 			lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
1402 			if (!lva)
1403 				return -1;
1404 		}
1405 
1406 		/*
1407 		 * Build the remainder.
1408 		 */
1409 		lva->va_start = va->va_start;
1410 		lva->va_end = nva_start_addr;
1411 
1412 		/*
1413 		 * Shrink this VA to remaining size.
1414 		 */
1415 		va->va_start = nva_start_addr + size;
1416 	} else {
1417 		return -1;
1418 	}
1419 
1420 	if (type != FL_FIT_TYPE) {
1421 		augment_tree_propagate_from(va);
1422 
1423 		if (lva)	/* type == NE_FIT_TYPE */
1424 			insert_vmap_area_augment(lva, &va->rb_node,
1425 				&free_vmap_area_root, &free_vmap_area_list);
1426 	}
1427 
1428 	return 0;
1429 }
1430 
1431 /*
1432  * Returns a start address of the newly allocated area, if success.
1433  * Otherwise a vend is returned that indicates failure.
1434  */
1435 static __always_inline unsigned long
__alloc_vmap_area(unsigned long size,unsigned long align,unsigned long vstart,unsigned long vend)1436 __alloc_vmap_area(unsigned long size, unsigned long align,
1437 	unsigned long vstart, unsigned long vend)
1438 {
1439 	unsigned long nva_start_addr;
1440 	struct vmap_area *va;
1441 	enum fit_type type;
1442 	int ret;
1443 
1444 	va = find_vmap_lowest_match(size, align, vstart);
1445 	if (unlikely(!va))
1446 		return vend;
1447 
1448 	if (va->va_start > vstart)
1449 		nva_start_addr = ALIGN(va->va_start, align);
1450 	else
1451 		nva_start_addr = ALIGN(vstart, align);
1452 
1453 	/* Check the "vend" restriction. */
1454 	if (nva_start_addr + size > vend)
1455 		return vend;
1456 
1457 	/* Classify what we have found. */
1458 	type = classify_va_fit_type(va, nva_start_addr, size);
1459 	if (WARN_ON_ONCE(type == NOTHING_FIT))
1460 		return vend;
1461 
1462 	/* Update the free vmap_area. */
1463 	ret = adjust_va_to_fit_type(va, nva_start_addr, size, type);
1464 	if (ret)
1465 		return vend;
1466 
1467 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1468 	find_vmap_lowest_match_check(size);
1469 #endif
1470 
1471 	return nva_start_addr;
1472 }
1473 
1474 /*
1475  * Free a region of KVA allocated by alloc_vmap_area
1476  */
free_vmap_area(struct vmap_area * va)1477 static void free_vmap_area(struct vmap_area *va)
1478 {
1479 	/*
1480 	 * Remove from the busy tree/list.
1481 	 */
1482 	spin_lock(&vmap_area_lock);
1483 	unlink_va(va, &vmap_area_root);
1484 	spin_unlock(&vmap_area_lock);
1485 
1486 	/*
1487 	 * Insert/Merge it back to the free tree/list.
1488 	 */
1489 	spin_lock(&free_vmap_area_lock);
1490 	merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list);
1491 	spin_unlock(&free_vmap_area_lock);
1492 }
1493 
1494 static inline void
preload_this_cpu_lock(spinlock_t * lock,gfp_t gfp_mask,int node)1495 preload_this_cpu_lock(spinlock_t *lock, gfp_t gfp_mask, int node)
1496 {
1497 	struct vmap_area *va = NULL;
1498 
1499 	/*
1500 	 * Preload this CPU with one extra vmap_area object. It is used
1501 	 * when fit type of free area is NE_FIT_TYPE. It guarantees that
1502 	 * a CPU that does an allocation is preloaded.
1503 	 *
1504 	 * We do it in non-atomic context, thus it allows us to use more
1505 	 * permissive allocation masks to be more stable under low memory
1506 	 * condition and high memory pressure.
1507 	 */
1508 	if (!this_cpu_read(ne_fit_preload_node))
1509 		va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1510 
1511 	spin_lock(lock);
1512 
1513 	if (va && __this_cpu_cmpxchg(ne_fit_preload_node, NULL, va))
1514 		kmem_cache_free(vmap_area_cachep, va);
1515 }
1516 
1517 /*
1518  * Allocate a region of KVA of the specified size and alignment, within the
1519  * vstart and vend.
1520  */
alloc_vmap_area(unsigned long size,unsigned long align,unsigned long vstart,unsigned long vend,int node,gfp_t gfp_mask)1521 static struct vmap_area *alloc_vmap_area(unsigned long size,
1522 				unsigned long align,
1523 				unsigned long vstart, unsigned long vend,
1524 				int node, gfp_t gfp_mask)
1525 {
1526 	struct vmap_area *va;
1527 	unsigned long freed;
1528 	unsigned long addr;
1529 	int purged = 0;
1530 	int ret;
1531 
1532 	BUG_ON(!size);
1533 	BUG_ON(offset_in_page(size));
1534 	BUG_ON(!is_power_of_2(align));
1535 
1536 	if (unlikely(!vmap_initialized))
1537 		return ERR_PTR(-EBUSY);
1538 
1539 	might_sleep();
1540 	gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
1541 
1542 	va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1543 	if (unlikely(!va))
1544 		return ERR_PTR(-ENOMEM);
1545 
1546 	/*
1547 	 * Only scan the relevant parts containing pointers to other objects
1548 	 * to avoid false negatives.
1549 	 */
1550 	kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask);
1551 
1552 retry:
1553 	preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node);
1554 	addr = __alloc_vmap_area(size, align, vstart, vend);
1555 	spin_unlock(&free_vmap_area_lock);
1556 
1557 	/*
1558 	 * If an allocation fails, the "vend" address is
1559 	 * returned. Therefore trigger the overflow path.
1560 	 */
1561 	if (unlikely(addr == vend))
1562 		goto overflow;
1563 
1564 	va->va_start = addr;
1565 	va->va_end = addr + size;
1566 	va->vm = NULL;
1567 
1568 	spin_lock(&vmap_area_lock);
1569 	insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
1570 	spin_unlock(&vmap_area_lock);
1571 
1572 	BUG_ON(!IS_ALIGNED(va->va_start, align));
1573 	BUG_ON(va->va_start < vstart);
1574 	BUG_ON(va->va_end > vend);
1575 
1576 	ret = kasan_populate_vmalloc(addr, size);
1577 	if (ret) {
1578 		free_vmap_area(va);
1579 		return ERR_PTR(ret);
1580 	}
1581 
1582 	return va;
1583 
1584 overflow:
1585 	if (!purged) {
1586 		purge_vmap_area_lazy();
1587 		purged = 1;
1588 		goto retry;
1589 	}
1590 
1591 	freed = 0;
1592 	blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
1593 
1594 	if (freed > 0) {
1595 		purged = 0;
1596 		goto retry;
1597 	}
1598 
1599 	if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit())
1600 		pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
1601 			size);
1602 
1603 	kmem_cache_free(vmap_area_cachep, va);
1604 	return ERR_PTR(-EBUSY);
1605 }
1606 
register_vmap_purge_notifier(struct notifier_block * nb)1607 int register_vmap_purge_notifier(struct notifier_block *nb)
1608 {
1609 	return blocking_notifier_chain_register(&vmap_notify_list, nb);
1610 }
1611 EXPORT_SYMBOL_GPL(register_vmap_purge_notifier);
1612 
unregister_vmap_purge_notifier(struct notifier_block * nb)1613 int unregister_vmap_purge_notifier(struct notifier_block *nb)
1614 {
1615 	return blocking_notifier_chain_unregister(&vmap_notify_list, nb);
1616 }
1617 EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
1618 
1619 /*
1620  * lazy_max_pages is the maximum amount of virtual address space we gather up
1621  * before attempting to purge with a TLB flush.
1622  *
1623  * There is a tradeoff here: a larger number will cover more kernel page tables
1624  * and take slightly longer to purge, but it will linearly reduce the number of
1625  * global TLB flushes that must be performed. It would seem natural to scale
1626  * this number up linearly with the number of CPUs (because vmapping activity
1627  * could also scale linearly with the number of CPUs), however it is likely
1628  * that in practice, workloads might be constrained in other ways that mean
1629  * vmap activity will not scale linearly with CPUs. Also, I want to be
1630  * conservative and not introduce a big latency on huge systems, so go with
1631  * a less aggressive log scale. It will still be an improvement over the old
1632  * code, and it will be simple to change the scale factor if we find that it
1633  * becomes a problem on bigger systems.
1634  */
lazy_max_pages(void)1635 static unsigned long lazy_max_pages(void)
1636 {
1637 	unsigned int log;
1638 
1639 	log = fls(num_online_cpus());
1640 
1641 	return log * (32UL * 1024 * 1024 / PAGE_SIZE);
1642 }
1643 
1644 static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0);
1645 
1646 /*
1647  * Serialize vmap purging.  There is no actual critical section protected
1648  * by this look, but we want to avoid concurrent calls for performance
1649  * reasons and to make the pcpu_get_vm_areas more deterministic.
1650  */
1651 static DEFINE_MUTEX(vmap_purge_lock);
1652 
1653 /* for per-CPU blocks */
1654 static void purge_fragmented_blocks_allcpus(void);
1655 
1656 #ifdef CONFIG_X86_64
1657 /*
1658  * called before a call to iounmap() if the caller wants vm_area_struct's
1659  * immediately freed.
1660  */
set_iounmap_nonlazy(void)1661 void set_iounmap_nonlazy(void)
1662 {
1663 	atomic_long_set(&vmap_lazy_nr, lazy_max_pages()+1);
1664 }
1665 #endif /* CONFIG_X86_64 */
1666 
1667 /*
1668  * Purges all lazily-freed vmap areas.
1669  */
__purge_vmap_area_lazy(unsigned long start,unsigned long end)1670 static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
1671 {
1672 	unsigned long resched_threshold;
1673 	struct list_head local_pure_list;
1674 	struct vmap_area *va, *n_va;
1675 
1676 	lockdep_assert_held(&vmap_purge_lock);
1677 
1678 	spin_lock(&purge_vmap_area_lock);
1679 	purge_vmap_area_root = RB_ROOT;
1680 	list_replace_init(&purge_vmap_area_list, &local_pure_list);
1681 	spin_unlock(&purge_vmap_area_lock);
1682 
1683 	if (unlikely(list_empty(&local_pure_list)))
1684 		return false;
1685 
1686 	start = min(start,
1687 		list_first_entry(&local_pure_list,
1688 			struct vmap_area, list)->va_start);
1689 
1690 	end = max(end,
1691 		list_last_entry(&local_pure_list,
1692 			struct vmap_area, list)->va_end);
1693 
1694 	flush_tlb_kernel_range(start, end);
1695 	resched_threshold = lazy_max_pages() << 1;
1696 
1697 	spin_lock(&free_vmap_area_lock);
1698 	list_for_each_entry_safe(va, n_va, &local_pure_list, list) {
1699 		unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
1700 		unsigned long orig_start = va->va_start;
1701 		unsigned long orig_end = va->va_end;
1702 
1703 		/*
1704 		 * Finally insert or merge lazily-freed area. It is
1705 		 * detached and there is no need to "unlink" it from
1706 		 * anything.
1707 		 */
1708 		va = merge_or_add_vmap_area_augment(va, &free_vmap_area_root,
1709 				&free_vmap_area_list);
1710 
1711 		if (!va)
1712 			continue;
1713 
1714 		if (is_vmalloc_or_module_addr((void *)orig_start))
1715 			kasan_release_vmalloc(orig_start, orig_end,
1716 					      va->va_start, va->va_end);
1717 
1718 		atomic_long_sub(nr, &vmap_lazy_nr);
1719 
1720 		if (atomic_long_read(&vmap_lazy_nr) < resched_threshold)
1721 			cond_resched_lock(&free_vmap_area_lock);
1722 	}
1723 	spin_unlock(&free_vmap_area_lock);
1724 	return true;
1725 }
1726 
1727 /*
1728  * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
1729  * is already purging.
1730  */
try_purge_vmap_area_lazy(void)1731 static void try_purge_vmap_area_lazy(void)
1732 {
1733 	if (mutex_trylock(&vmap_purge_lock)) {
1734 		__purge_vmap_area_lazy(ULONG_MAX, 0);
1735 		mutex_unlock(&vmap_purge_lock);
1736 	}
1737 }
1738 
1739 /*
1740  * Kick off a purge of the outstanding lazy areas.
1741  */
purge_vmap_area_lazy(void)1742 static void purge_vmap_area_lazy(void)
1743 {
1744 	mutex_lock(&vmap_purge_lock);
1745 	purge_fragmented_blocks_allcpus();
1746 	__purge_vmap_area_lazy(ULONG_MAX, 0);
1747 	mutex_unlock(&vmap_purge_lock);
1748 }
1749 
1750 /*
1751  * Free a vmap area, caller ensuring that the area has been unmapped
1752  * and flush_cache_vunmap had been called for the correct range
1753  * previously.
1754  */
free_vmap_area_noflush(struct vmap_area * va)1755 static void free_vmap_area_noflush(struct vmap_area *va)
1756 {
1757 	unsigned long nr_lazy;
1758 
1759 	spin_lock(&vmap_area_lock);
1760 	unlink_va(va, &vmap_area_root);
1761 	spin_unlock(&vmap_area_lock);
1762 
1763 	nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >>
1764 				PAGE_SHIFT, &vmap_lazy_nr);
1765 
1766 	/*
1767 	 * Merge or place it to the purge tree/list.
1768 	 */
1769 	spin_lock(&purge_vmap_area_lock);
1770 	merge_or_add_vmap_area(va,
1771 		&purge_vmap_area_root, &purge_vmap_area_list);
1772 	spin_unlock(&purge_vmap_area_lock);
1773 
1774 	/* After this point, we may free va at any time */
1775 	if (unlikely(nr_lazy > lazy_max_pages()))
1776 		try_purge_vmap_area_lazy();
1777 }
1778 
1779 /*
1780  * Free and unmap a vmap area
1781  */
free_unmap_vmap_area(struct vmap_area * va)1782 static void free_unmap_vmap_area(struct vmap_area *va)
1783 {
1784 	flush_cache_vunmap(va->va_start, va->va_end);
1785 	vunmap_range_noflush(va->va_start, va->va_end);
1786 	if (debug_pagealloc_enabled_static())
1787 		flush_tlb_kernel_range(va->va_start, va->va_end);
1788 
1789 	free_vmap_area_noflush(va);
1790 }
1791 
find_vmap_area(unsigned long addr)1792 static struct vmap_area *find_vmap_area(unsigned long addr)
1793 {
1794 	struct vmap_area *va;
1795 
1796 	spin_lock(&vmap_area_lock);
1797 	va = __find_vmap_area(addr);
1798 	spin_unlock(&vmap_area_lock);
1799 
1800 	return va;
1801 }
1802 
1803 /*** Per cpu kva allocator ***/
1804 
1805 /*
1806  * vmap space is limited especially on 32 bit architectures. Ensure there is
1807  * room for at least 16 percpu vmap blocks per CPU.
1808  */
1809 /*
1810  * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
1811  * to #define VMALLOC_SPACE		(VMALLOC_END-VMALLOC_START). Guess
1812  * instead (we just need a rough idea)
1813  */
1814 #if BITS_PER_LONG == 32
1815 #define VMALLOC_SPACE		(128UL*1024*1024)
1816 #else
1817 #define VMALLOC_SPACE		(128UL*1024*1024*1024)
1818 #endif
1819 
1820 #define VMALLOC_PAGES		(VMALLOC_SPACE / PAGE_SIZE)
1821 #define VMAP_MAX_ALLOC		BITS_PER_LONG	/* 256K with 4K pages */
1822 #define VMAP_BBMAP_BITS_MAX	1024	/* 4MB with 4K pages */
1823 #define VMAP_BBMAP_BITS_MIN	(VMAP_MAX_ALLOC*2)
1824 #define VMAP_MIN(x, y)		((x) < (y) ? (x) : (y)) /* can't use min() */
1825 #define VMAP_MAX(x, y)		((x) > (y) ? (x) : (y)) /* can't use max() */
1826 #define VMAP_BBMAP_BITS		\
1827 		VMAP_MIN(VMAP_BBMAP_BITS_MAX,	\
1828 		VMAP_MAX(VMAP_BBMAP_BITS_MIN,	\
1829 			VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
1830 
1831 #define VMAP_BLOCK_SIZE		(VMAP_BBMAP_BITS * PAGE_SIZE)
1832 
1833 struct vmap_block_queue {
1834 	spinlock_t lock;
1835 	struct list_head free;
1836 };
1837 
1838 struct vmap_block {
1839 	spinlock_t lock;
1840 	struct vmap_area *va;
1841 	unsigned long free, dirty;
1842 	unsigned long dirty_min, dirty_max; /*< dirty range */
1843 	struct list_head free_list;
1844 	struct rcu_head rcu_head;
1845 	struct list_head purge;
1846 };
1847 
1848 /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
1849 static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
1850 
1851 /*
1852  * XArray of vmap blocks, indexed by address, to quickly find a vmap block
1853  * in the free path. Could get rid of this if we change the API to return a
1854  * "cookie" from alloc, to be passed to free. But no big deal yet.
1855  */
1856 static DEFINE_XARRAY(vmap_blocks);
1857 
1858 /*
1859  * We should probably have a fallback mechanism to allocate virtual memory
1860  * out of partially filled vmap blocks. However vmap block sizing should be
1861  * fairly reasonable according to the vmalloc size, so it shouldn't be a
1862  * big problem.
1863  */
1864 
addr_to_vb_idx(unsigned long addr)1865 static unsigned long addr_to_vb_idx(unsigned long addr)
1866 {
1867 	addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
1868 	addr /= VMAP_BLOCK_SIZE;
1869 	return addr;
1870 }
1871 
vmap_block_vaddr(unsigned long va_start,unsigned long pages_off)1872 static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
1873 {
1874 	unsigned long addr;
1875 
1876 	addr = va_start + (pages_off << PAGE_SHIFT);
1877 	BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
1878 	return (void *)addr;
1879 }
1880 
1881 /**
1882  * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
1883  *                  block. Of course pages number can't exceed VMAP_BBMAP_BITS
1884  * @order:    how many 2^order pages should be occupied in newly allocated block
1885  * @gfp_mask: flags for the page level allocator
1886  *
1887  * Return: virtual address in a newly allocated block or ERR_PTR(-errno)
1888  */
new_vmap_block(unsigned int order,gfp_t gfp_mask)1889 static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
1890 {
1891 	struct vmap_block_queue *vbq;
1892 	struct vmap_block *vb;
1893 	struct vmap_area *va;
1894 	unsigned long vb_idx;
1895 	int node, err;
1896 	void *vaddr;
1897 
1898 	node = numa_node_id();
1899 
1900 	vb = kmalloc_node(sizeof(struct vmap_block),
1901 			gfp_mask & GFP_RECLAIM_MASK, node);
1902 	if (unlikely(!vb))
1903 		return ERR_PTR(-ENOMEM);
1904 
1905 	va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
1906 					VMALLOC_START, VMALLOC_END,
1907 					node, gfp_mask);
1908 	if (IS_ERR(va)) {
1909 		kfree(vb);
1910 		return ERR_CAST(va);
1911 	}
1912 
1913 	vaddr = vmap_block_vaddr(va->va_start, 0);
1914 	spin_lock_init(&vb->lock);
1915 	vb->va = va;
1916 	/* At least something should be left free */
1917 	BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
1918 	vb->free = VMAP_BBMAP_BITS - (1UL << order);
1919 	vb->dirty = 0;
1920 	vb->dirty_min = VMAP_BBMAP_BITS;
1921 	vb->dirty_max = 0;
1922 	INIT_LIST_HEAD(&vb->free_list);
1923 
1924 	vb_idx = addr_to_vb_idx(va->va_start);
1925 	err = xa_insert(&vmap_blocks, vb_idx, vb, gfp_mask);
1926 	if (err) {
1927 		kfree(vb);
1928 		free_vmap_area(va);
1929 		return ERR_PTR(err);
1930 	}
1931 
1932 	vbq = &get_cpu_var(vmap_block_queue);
1933 	spin_lock(&vbq->lock);
1934 	list_add_tail_rcu(&vb->free_list, &vbq->free);
1935 	spin_unlock(&vbq->lock);
1936 	put_cpu_var(vmap_block_queue);
1937 
1938 	return vaddr;
1939 }
1940 
free_vmap_block(struct vmap_block * vb)1941 static void free_vmap_block(struct vmap_block *vb)
1942 {
1943 	struct vmap_block *tmp;
1944 
1945 	tmp = xa_erase(&vmap_blocks, addr_to_vb_idx(vb->va->va_start));
1946 	BUG_ON(tmp != vb);
1947 
1948 	free_vmap_area_noflush(vb->va);
1949 	kfree_rcu(vb, rcu_head);
1950 }
1951 
purge_fragmented_blocks(int cpu)1952 static void purge_fragmented_blocks(int cpu)
1953 {
1954 	LIST_HEAD(purge);
1955 	struct vmap_block *vb;
1956 	struct vmap_block *n_vb;
1957 	struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
1958 
1959 	rcu_read_lock();
1960 	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1961 
1962 		if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
1963 			continue;
1964 
1965 		spin_lock(&vb->lock);
1966 		if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
1967 			vb->free = 0; /* prevent further allocs after releasing lock */
1968 			vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */
1969 			vb->dirty_min = 0;
1970 			vb->dirty_max = VMAP_BBMAP_BITS;
1971 			spin_lock(&vbq->lock);
1972 			list_del_rcu(&vb->free_list);
1973 			spin_unlock(&vbq->lock);
1974 			spin_unlock(&vb->lock);
1975 			list_add_tail(&vb->purge, &purge);
1976 		} else
1977 			spin_unlock(&vb->lock);
1978 	}
1979 	rcu_read_unlock();
1980 
1981 	list_for_each_entry_safe(vb, n_vb, &purge, purge) {
1982 		list_del(&vb->purge);
1983 		free_vmap_block(vb);
1984 	}
1985 }
1986 
purge_fragmented_blocks_allcpus(void)1987 static void purge_fragmented_blocks_allcpus(void)
1988 {
1989 	int cpu;
1990 
1991 	for_each_possible_cpu(cpu)
1992 		purge_fragmented_blocks(cpu);
1993 }
1994 
vb_alloc(unsigned long size,gfp_t gfp_mask)1995 static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
1996 {
1997 	struct vmap_block_queue *vbq;
1998 	struct vmap_block *vb;
1999 	void *vaddr = NULL;
2000 	unsigned int order;
2001 
2002 	BUG_ON(offset_in_page(size));
2003 	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
2004 	if (WARN_ON(size == 0)) {
2005 		/*
2006 		 * Allocating 0 bytes isn't what caller wants since
2007 		 * get_order(0) returns funny result. Just warn and terminate
2008 		 * early.
2009 		 */
2010 		return NULL;
2011 	}
2012 	order = get_order(size);
2013 
2014 	rcu_read_lock();
2015 	vbq = &get_cpu_var(vmap_block_queue);
2016 	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
2017 		unsigned long pages_off;
2018 
2019 		spin_lock(&vb->lock);
2020 		if (vb->free < (1UL << order)) {
2021 			spin_unlock(&vb->lock);
2022 			continue;
2023 		}
2024 
2025 		pages_off = VMAP_BBMAP_BITS - vb->free;
2026 		vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
2027 		vb->free -= 1UL << order;
2028 		if (vb->free == 0) {
2029 			spin_lock(&vbq->lock);
2030 			list_del_rcu(&vb->free_list);
2031 			spin_unlock(&vbq->lock);
2032 		}
2033 
2034 		spin_unlock(&vb->lock);
2035 		break;
2036 	}
2037 
2038 	put_cpu_var(vmap_block_queue);
2039 	rcu_read_unlock();
2040 
2041 	/* Allocate new block if nothing was found */
2042 	if (!vaddr)
2043 		vaddr = new_vmap_block(order, gfp_mask);
2044 
2045 	return vaddr;
2046 }
2047 
vb_free(unsigned long addr,unsigned long size)2048 static void vb_free(unsigned long addr, unsigned long size)
2049 {
2050 	unsigned long offset;
2051 	unsigned int order;
2052 	struct vmap_block *vb;
2053 
2054 	BUG_ON(offset_in_page(size));
2055 	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
2056 
2057 	flush_cache_vunmap(addr, addr + size);
2058 
2059 	order = get_order(size);
2060 	offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT;
2061 	vb = xa_load(&vmap_blocks, addr_to_vb_idx(addr));
2062 
2063 	vunmap_range_noflush(addr, addr + size);
2064 
2065 	if (debug_pagealloc_enabled_static())
2066 		flush_tlb_kernel_range(addr, addr + size);
2067 
2068 	spin_lock(&vb->lock);
2069 
2070 	/* Expand dirty range */
2071 	vb->dirty_min = min(vb->dirty_min, offset);
2072 	vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
2073 
2074 	vb->dirty += 1UL << order;
2075 	if (vb->dirty == VMAP_BBMAP_BITS) {
2076 		BUG_ON(vb->free);
2077 		spin_unlock(&vb->lock);
2078 		free_vmap_block(vb);
2079 	} else
2080 		spin_unlock(&vb->lock);
2081 }
2082 
_vm_unmap_aliases(unsigned long start,unsigned long end,int flush)2083 static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
2084 {
2085 	int cpu;
2086 
2087 	if (unlikely(!vmap_initialized))
2088 		return;
2089 
2090 	might_sleep();
2091 
2092 	for_each_possible_cpu(cpu) {
2093 		struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
2094 		struct vmap_block *vb;
2095 
2096 		rcu_read_lock();
2097 		list_for_each_entry_rcu(vb, &vbq->free, free_list) {
2098 			spin_lock(&vb->lock);
2099 			if (vb->dirty && vb->dirty != VMAP_BBMAP_BITS) {
2100 				unsigned long va_start = vb->va->va_start;
2101 				unsigned long s, e;
2102 
2103 				s = va_start + (vb->dirty_min << PAGE_SHIFT);
2104 				e = va_start + (vb->dirty_max << PAGE_SHIFT);
2105 
2106 				start = min(s, start);
2107 				end   = max(e, end);
2108 
2109 				flush = 1;
2110 			}
2111 			spin_unlock(&vb->lock);
2112 		}
2113 		rcu_read_unlock();
2114 	}
2115 
2116 	mutex_lock(&vmap_purge_lock);
2117 	purge_fragmented_blocks_allcpus();
2118 	if (!__purge_vmap_area_lazy(start, end) && flush)
2119 		flush_tlb_kernel_range(start, end);
2120 	mutex_unlock(&vmap_purge_lock);
2121 }
2122 
2123 /**
2124  * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
2125  *
2126  * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
2127  * to amortize TLB flushing overheads. What this means is that any page you
2128  * have now, may, in a former life, have been mapped into kernel virtual
2129  * address by the vmap layer and so there might be some CPUs with TLB entries
2130  * still referencing that page (additional to the regular 1:1 kernel mapping).
2131  *
2132  * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
2133  * be sure that none of the pages we have control over will have any aliases
2134  * from the vmap layer.
2135  */
vm_unmap_aliases(void)2136 void vm_unmap_aliases(void)
2137 {
2138 	unsigned long start = ULONG_MAX, end = 0;
2139 	int flush = 0;
2140 
2141 	_vm_unmap_aliases(start, end, flush);
2142 }
2143 EXPORT_SYMBOL_GPL(vm_unmap_aliases);
2144 
2145 /**
2146  * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
2147  * @mem: the pointer returned by vm_map_ram
2148  * @count: the count passed to that vm_map_ram call (cannot unmap partial)
2149  */
vm_unmap_ram(const void * mem,unsigned int count)2150 void vm_unmap_ram(const void *mem, unsigned int count)
2151 {
2152 	unsigned long size = (unsigned long)count << PAGE_SHIFT;
2153 	unsigned long addr = (unsigned long)kasan_reset_tag(mem);
2154 	struct vmap_area *va;
2155 
2156 	might_sleep();
2157 	BUG_ON(!addr);
2158 	BUG_ON(addr < VMALLOC_START);
2159 	BUG_ON(addr > VMALLOC_END);
2160 	BUG_ON(!PAGE_ALIGNED(addr));
2161 
2162 	kasan_poison_vmalloc(mem, size);
2163 
2164 	if (likely(count <= VMAP_MAX_ALLOC)) {
2165 		debug_check_no_locks_freed(mem, size);
2166 		vb_free(addr, size);
2167 		return;
2168 	}
2169 
2170 	va = find_vmap_area(addr);
2171 	BUG_ON(!va);
2172 	debug_check_no_locks_freed((void *)va->va_start,
2173 				    (va->va_end - va->va_start));
2174 	free_unmap_vmap_area(va);
2175 }
2176 EXPORT_SYMBOL(vm_unmap_ram);
2177 
2178 /**
2179  * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
2180  * @pages: an array of pointers to the pages to be mapped
2181  * @count: number of pages
2182  * @node: prefer to allocate data structures on this node
2183  *
2184  * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
2185  * faster than vmap so it's good.  But if you mix long-life and short-life
2186  * objects with vm_map_ram(), it could consume lots of address space through
2187  * fragmentation (especially on a 32bit machine).  You could see failures in
2188  * the end.  Please use this function for short-lived objects.
2189  *
2190  * Returns: a pointer to the address that has been mapped, or %NULL on failure
2191  */
vm_map_ram(struct page ** pages,unsigned int count,int node)2192 void *vm_map_ram(struct page **pages, unsigned int count, int node)
2193 {
2194 	unsigned long size = (unsigned long)count << PAGE_SHIFT;
2195 	unsigned long addr;
2196 	void *mem;
2197 
2198 	if (likely(count <= VMAP_MAX_ALLOC)) {
2199 		mem = vb_alloc(size, GFP_KERNEL);
2200 		if (IS_ERR(mem))
2201 			return NULL;
2202 		addr = (unsigned long)mem;
2203 	} else {
2204 		struct vmap_area *va;
2205 		va = alloc_vmap_area(size, PAGE_SIZE,
2206 				VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
2207 		if (IS_ERR(va))
2208 			return NULL;
2209 
2210 		addr = va->va_start;
2211 		mem = (void *)addr;
2212 	}
2213 
2214 	if (vmap_pages_range(addr, addr + size, PAGE_KERNEL,
2215 				pages, PAGE_SHIFT) < 0) {
2216 		vm_unmap_ram(mem, count);
2217 		return NULL;
2218 	}
2219 
2220 	/*
2221 	 * Mark the pages as accessible, now that they are mapped.
2222 	 * With hardware tag-based KASAN, marking is skipped for
2223 	 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
2224 	 */
2225 	mem = kasan_unpoison_vmalloc(mem, size, KASAN_VMALLOC_PROT_NORMAL);
2226 
2227 	return mem;
2228 }
2229 EXPORT_SYMBOL(vm_map_ram);
2230 
2231 static struct vm_struct *vmlist __initdata;
2232 
vm_area_page_order(struct vm_struct * vm)2233 static inline unsigned int vm_area_page_order(struct vm_struct *vm)
2234 {
2235 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
2236 	return vm->page_order;
2237 #else
2238 	return 0;
2239 #endif
2240 }
2241 
set_vm_area_page_order(struct vm_struct * vm,unsigned int order)2242 static inline void set_vm_area_page_order(struct vm_struct *vm, unsigned int order)
2243 {
2244 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
2245 	vm->page_order = order;
2246 #else
2247 	BUG_ON(order != 0);
2248 #endif
2249 }
2250 
2251 /**
2252  * vm_area_add_early - add vmap area early during boot
2253  * @vm: vm_struct to add
2254  *
2255  * This function is used to add fixed kernel vm area to vmlist before
2256  * vmalloc_init() is called.  @vm->addr, @vm->size, and @vm->flags
2257  * should contain proper values and the other fields should be zero.
2258  *
2259  * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
2260  */
vm_area_add_early(struct vm_struct * vm)2261 void __init vm_area_add_early(struct vm_struct *vm)
2262 {
2263 	struct vm_struct *tmp, **p;
2264 
2265 	BUG_ON(vmap_initialized);
2266 	for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
2267 		if (tmp->addr >= vm->addr) {
2268 			BUG_ON(tmp->addr < vm->addr + vm->size);
2269 			break;
2270 		} else
2271 			BUG_ON(tmp->addr + tmp->size > vm->addr);
2272 	}
2273 	vm->next = *p;
2274 	*p = vm;
2275 }
2276 
2277 /**
2278  * vm_area_register_early - register vmap area early during boot
2279  * @vm: vm_struct to register
2280  * @align: requested alignment
2281  *
2282  * This function is used to register kernel vm area before
2283  * vmalloc_init() is called.  @vm->size and @vm->flags should contain
2284  * proper values on entry and other fields should be zero.  On return,
2285  * vm->addr contains the allocated address.
2286  *
2287  * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
2288  */
vm_area_register_early(struct vm_struct * vm,size_t align)2289 void __init vm_area_register_early(struct vm_struct *vm, size_t align)
2290 {
2291 	static size_t vm_init_off __initdata;
2292 	unsigned long addr;
2293 
2294 	addr = ALIGN(VMALLOC_START + vm_init_off, align);
2295 	vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
2296 
2297 	vm->addr = (void *)addr;
2298 
2299 	vm_area_add_early(vm);
2300 }
2301 
vmap_init_free_space(void)2302 static void vmap_init_free_space(void)
2303 {
2304 	unsigned long vmap_start = 1;
2305 	const unsigned long vmap_end = ULONG_MAX;
2306 	struct vmap_area *busy, *free;
2307 
2308 	/*
2309 	 *     B     F     B     B     B     F
2310 	 * -|-----|.....|-----|-----|-----|.....|-
2311 	 *  |           The KVA space           |
2312 	 *  |<--------------------------------->|
2313 	 */
2314 	list_for_each_entry(busy, &vmap_area_list, list) {
2315 		if (busy->va_start - vmap_start > 0) {
2316 			free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
2317 			if (!WARN_ON_ONCE(!free)) {
2318 				free->va_start = vmap_start;
2319 				free->va_end = busy->va_start;
2320 
2321 				insert_vmap_area_augment(free, NULL,
2322 					&free_vmap_area_root,
2323 						&free_vmap_area_list);
2324 			}
2325 		}
2326 
2327 		vmap_start = busy->va_end;
2328 	}
2329 
2330 	if (vmap_end - vmap_start > 0) {
2331 		free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
2332 		if (!WARN_ON_ONCE(!free)) {
2333 			free->va_start = vmap_start;
2334 			free->va_end = vmap_end;
2335 
2336 			insert_vmap_area_augment(free, NULL,
2337 				&free_vmap_area_root,
2338 					&free_vmap_area_list);
2339 		}
2340 	}
2341 }
2342 
vmalloc_init(void)2343 void __init vmalloc_init(void)
2344 {
2345 	struct vmap_area *va;
2346 	struct vm_struct *tmp;
2347 	int i;
2348 
2349 	/*
2350 	 * Create the cache for vmap_area objects.
2351 	 */
2352 	vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC);
2353 
2354 	for_each_possible_cpu(i) {
2355 		struct vmap_block_queue *vbq;
2356 		struct vfree_deferred *p;
2357 
2358 		vbq = &per_cpu(vmap_block_queue, i);
2359 		spin_lock_init(&vbq->lock);
2360 		INIT_LIST_HEAD(&vbq->free);
2361 		p = &per_cpu(vfree_deferred, i);
2362 		init_llist_head(&p->list);
2363 		INIT_WORK(&p->wq, free_work);
2364 	}
2365 
2366 	/* Import existing vmlist entries. */
2367 	for (tmp = vmlist; tmp; tmp = tmp->next) {
2368 		va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
2369 		if (WARN_ON_ONCE(!va))
2370 			continue;
2371 
2372 		va->va_start = (unsigned long)tmp->addr;
2373 		va->va_end = va->va_start + tmp->size;
2374 		va->vm = tmp;
2375 		insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
2376 	}
2377 
2378 	/*
2379 	 * Now we can initialize a free vmap space.
2380 	 */
2381 	vmap_init_free_space();
2382 	vmap_initialized = true;
2383 }
2384 
setup_vmalloc_vm_locked(struct vm_struct * vm,struct vmap_area * va,unsigned long flags,const void * caller)2385 static inline void setup_vmalloc_vm_locked(struct vm_struct *vm,
2386 	struct vmap_area *va, unsigned long flags, const void *caller)
2387 {
2388 	vm->flags = flags;
2389 	vm->addr = (void *)va->va_start;
2390 	vm->size = va->va_end - va->va_start;
2391 	vm->caller = caller;
2392 	va->vm = vm;
2393 }
2394 
setup_vmalloc_vm(struct vm_struct * vm,struct vmap_area * va,unsigned long flags,const void * caller)2395 static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
2396 			      unsigned long flags, const void *caller)
2397 {
2398 	spin_lock(&vmap_area_lock);
2399 	setup_vmalloc_vm_locked(vm, va, flags, caller);
2400 	spin_unlock(&vmap_area_lock);
2401 }
2402 
clear_vm_uninitialized_flag(struct vm_struct * vm)2403 static void clear_vm_uninitialized_flag(struct vm_struct *vm)
2404 {
2405 	/*
2406 	 * Before removing VM_UNINITIALIZED,
2407 	 * we should make sure that vm has proper values.
2408 	 * Pair with smp_rmb() in show_numa_info().
2409 	 */
2410 	smp_wmb();
2411 	vm->flags &= ~VM_UNINITIALIZED;
2412 }
2413 
__get_vm_area_node(unsigned long size,unsigned long align,unsigned long shift,unsigned long flags,unsigned long start,unsigned long end,int node,gfp_t gfp_mask,const void * caller)2414 static struct vm_struct *__get_vm_area_node(unsigned long size,
2415 		unsigned long align, unsigned long shift, unsigned long flags,
2416 		unsigned long start, unsigned long end, int node,
2417 		gfp_t gfp_mask, const void *caller)
2418 {
2419 	struct vmap_area *va;
2420 	struct vm_struct *area;
2421 	unsigned long requested_size = size;
2422 
2423 	BUG_ON(in_interrupt());
2424 	size = ALIGN(size, 1ul << shift);
2425 	if (unlikely(!size))
2426 		return NULL;
2427 
2428 	if (flags & VM_IOREMAP)
2429 		align = 1ul << clamp_t(int, get_count_order_long(size),
2430 				       PAGE_SHIFT, IOREMAP_MAX_ORDER);
2431 
2432 	area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
2433 	if (unlikely(!area))
2434 		return NULL;
2435 
2436 	if (!(flags & VM_NO_GUARD))
2437 		size += PAGE_SIZE;
2438 
2439 	va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
2440 	if (IS_ERR(va)) {
2441 		kfree(area);
2442 		return NULL;
2443 	}
2444 
2445 	setup_vmalloc_vm(area, va, flags, caller);
2446 
2447 	/*
2448 	 * Mark pages for non-VM_ALLOC mappings as accessible. Do it now as a
2449 	 * best-effort approach, as they can be mapped outside of vmalloc code.
2450 	 * For VM_ALLOC mappings, the pages are marked as accessible after
2451 	 * getting mapped in __vmalloc_node_range().
2452 	 * With hardware tag-based KASAN, marking is skipped for
2453 	 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
2454 	 */
2455 	if (!(flags & VM_ALLOC))
2456 		area->addr = kasan_unpoison_vmalloc(area->addr, requested_size,
2457 						    KASAN_VMALLOC_PROT_NORMAL);
2458 
2459 	return area;
2460 }
2461 
__get_vm_area_caller(unsigned long size,unsigned long flags,unsigned long start,unsigned long end,const void * caller)2462 struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
2463 				       unsigned long start, unsigned long end,
2464 				       const void *caller)
2465 {
2466 	return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end,
2467 				  NUMA_NO_NODE, GFP_KERNEL, caller);
2468 }
2469 
2470 /**
2471  * get_vm_area - reserve a contiguous kernel virtual area
2472  * @size:	 size of the area
2473  * @flags:	 %VM_IOREMAP for I/O mappings or VM_ALLOC
2474  *
2475  * Search an area of @size in the kernel virtual mapping area,
2476  * and reserved it for out purposes.  Returns the area descriptor
2477  * on success or %NULL on failure.
2478  *
2479  * Return: the area descriptor on success or %NULL on failure.
2480  */
get_vm_area(unsigned long size,unsigned long flags)2481 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
2482 {
2483 	return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
2484 				  VMALLOC_START, VMALLOC_END,
2485 				  NUMA_NO_NODE, GFP_KERNEL,
2486 				  __builtin_return_address(0));
2487 }
2488 
get_vm_area_caller(unsigned long size,unsigned long flags,const void * caller)2489 struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
2490 				const void *caller)
2491 {
2492 	return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
2493 				  VMALLOC_START, VMALLOC_END,
2494 				  NUMA_NO_NODE, GFP_KERNEL, caller);
2495 }
2496 
2497 /**
2498  * find_vm_area - find a continuous kernel virtual area
2499  * @addr:	  base address
2500  *
2501  * Search for the kernel VM area starting at @addr, and return it.
2502  * It is up to the caller to do all required locking to keep the returned
2503  * pointer valid.
2504  *
2505  * Return: the area descriptor on success or %NULL on failure.
2506  */
find_vm_area(const void * addr)2507 struct vm_struct *find_vm_area(const void *addr)
2508 {
2509 	struct vmap_area *va;
2510 
2511 	va = find_vmap_area((unsigned long)addr);
2512 	if (!va)
2513 		return NULL;
2514 
2515 	return va->vm;
2516 }
2517 EXPORT_SYMBOL_GPL(find_vm_area);
2518 
2519 /**
2520  * remove_vm_area - find and remove a continuous kernel virtual area
2521  * @addr:	    base address
2522  *
2523  * Search for the kernel VM area starting at @addr, and remove it.
2524  * This function returns the found VM area, but using it is NOT safe
2525  * on SMP machines, except for its size or flags.
2526  *
2527  * Return: the area descriptor on success or %NULL on failure.
2528  */
remove_vm_area(const void * addr)2529 struct vm_struct *remove_vm_area(const void *addr)
2530 {
2531 	struct vmap_area *va;
2532 
2533 	might_sleep();
2534 
2535 	spin_lock(&vmap_area_lock);
2536 	va = __find_vmap_area((unsigned long)addr);
2537 	if (va && va->vm) {
2538 		struct vm_struct *vm = va->vm;
2539 
2540 		va->vm = NULL;
2541 		spin_unlock(&vmap_area_lock);
2542 
2543 		kasan_free_module_shadow(vm);
2544 		free_unmap_vmap_area(va);
2545 
2546 		return vm;
2547 	}
2548 
2549 	spin_unlock(&vmap_area_lock);
2550 	return NULL;
2551 }
2552 
set_area_direct_map(const struct vm_struct * area,int (* set_direct_map)(struct page * page))2553 static inline void set_area_direct_map(const struct vm_struct *area,
2554 				       int (*set_direct_map)(struct page *page))
2555 {
2556 	int i;
2557 
2558 	/* HUGE_VMALLOC passes small pages to set_direct_map */
2559 	for (i = 0; i < area->nr_pages; i++)
2560 		if (page_address(area->pages[i]))
2561 			set_direct_map(area->pages[i]);
2562 }
2563 
2564 /* Handle removing and resetting vm mappings related to the vm_struct. */
vm_remove_mappings(struct vm_struct * area,int deallocate_pages)2565 static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
2566 {
2567 	unsigned long start = ULONG_MAX, end = 0;
2568 	unsigned int page_order = vm_area_page_order(area);
2569 	int flush_reset = area->flags & VM_FLUSH_RESET_PERMS;
2570 	int flush_dmap = 0;
2571 	int i;
2572 
2573 	remove_vm_area(area->addr);
2574 
2575 	/* If this is not VM_FLUSH_RESET_PERMS memory, no need for the below. */
2576 	if (!flush_reset)
2577 		return;
2578 
2579 	/*
2580 	 * If not deallocating pages, just do the flush of the VM area and
2581 	 * return.
2582 	 */
2583 	if (!deallocate_pages) {
2584 		vm_unmap_aliases();
2585 		return;
2586 	}
2587 
2588 	/*
2589 	 * If execution gets here, flush the vm mapping and reset the direct
2590 	 * map. Find the start and end range of the direct mappings to make sure
2591 	 * the vm_unmap_aliases() flush includes the direct map.
2592 	 */
2593 	for (i = 0; i < area->nr_pages; i += 1U << page_order) {
2594 		unsigned long addr = (unsigned long)page_address(area->pages[i]);
2595 		if (addr) {
2596 			unsigned long page_size;
2597 
2598 			page_size = PAGE_SIZE << page_order;
2599 			start = min(addr, start);
2600 			end = max(addr + page_size, end);
2601 			flush_dmap = 1;
2602 		}
2603 	}
2604 
2605 	/*
2606 	 * Set direct map to something invalid so that it won't be cached if
2607 	 * there are any accesses after the TLB flush, then flush the TLB and
2608 	 * reset the direct map permissions to the default.
2609 	 */
2610 	set_area_direct_map(area, set_direct_map_invalid_noflush);
2611 	_vm_unmap_aliases(start, end, flush_dmap);
2612 	set_area_direct_map(area, set_direct_map_default_noflush);
2613 }
2614 
__vunmap(const void * addr,int deallocate_pages)2615 static void __vunmap(const void *addr, int deallocate_pages)
2616 {
2617 	struct vm_struct *area;
2618 
2619 	if (!addr)
2620 		return;
2621 
2622 	if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
2623 			addr))
2624 		return;
2625 
2626 	area = find_vm_area(addr);
2627 	if (unlikely(!area)) {
2628 		WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
2629 				addr);
2630 		return;
2631 	}
2632 
2633 	debug_check_no_locks_freed(area->addr, get_vm_area_size(area));
2634 	debug_check_no_obj_freed(area->addr, get_vm_area_size(area));
2635 
2636 	kasan_poison_vmalloc(area->addr, get_vm_area_size(area));
2637 
2638 	if (IS_ENABLED(CONFIG_ARCH_HAS_IOREMAP_PHYS_HOOKS) &&
2639 	    area->flags & VM_IOREMAP)
2640 		iounmap_phys_range_hook(area->phys_addr, get_vm_area_size(area));
2641 
2642 	vm_remove_mappings(area, deallocate_pages);
2643 
2644 	if (deallocate_pages) {
2645 		unsigned int page_order = vm_area_page_order(area);
2646 		int i;
2647 
2648 		for (i = 0; i < area->nr_pages; i += 1U << page_order) {
2649 			struct page *page = area->pages[i];
2650 
2651 			BUG_ON(!page);
2652 			__free_pages(page, page_order);
2653 			cond_resched();
2654 		}
2655 		atomic_long_sub(area->nr_pages, &nr_vmalloc_pages);
2656 
2657 		kvfree(area->pages);
2658 	}
2659 
2660 	kfree(area);
2661 }
2662 
__vfree_deferred(const void * addr)2663 static inline void __vfree_deferred(const void *addr)
2664 {
2665 	/*
2666 	 * Use raw_cpu_ptr() because this can be called from preemptible
2667 	 * context. Preemption is absolutely fine here, because the llist_add()
2668 	 * implementation is lockless, so it works even if we are adding to
2669 	 * another cpu's list. schedule_work() should be fine with this too.
2670 	 */
2671 	struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
2672 
2673 	if (llist_add((struct llist_node *)addr, &p->list))
2674 		schedule_work(&p->wq);
2675 }
2676 
2677 /**
2678  * vfree_atomic - release memory allocated by vmalloc()
2679  * @addr:	  memory base address
2680  *
2681  * This one is just like vfree() but can be called in any atomic context
2682  * except NMIs.
2683  */
vfree_atomic(const void * addr)2684 void vfree_atomic(const void *addr)
2685 {
2686 	BUG_ON(in_nmi());
2687 
2688 	kmemleak_free(addr);
2689 
2690 	if (!addr)
2691 		return;
2692 	__vfree_deferred(addr);
2693 }
2694 
__vfree(const void * addr)2695 static void __vfree(const void *addr)
2696 {
2697 	if (unlikely(in_interrupt()))
2698 		__vfree_deferred(addr);
2699 	else
2700 		__vunmap(addr, 1);
2701 }
2702 
2703 /**
2704  * vfree - Release memory allocated by vmalloc()
2705  * @addr:  Memory base address
2706  *
2707  * Free the virtually continuous memory area starting at @addr, as obtained
2708  * from one of the vmalloc() family of APIs.  This will usually also free the
2709  * physical memory underlying the virtual allocation, but that memory is
2710  * reference counted, so it will not be freed until the last user goes away.
2711  *
2712  * If @addr is NULL, no operation is performed.
2713  *
2714  * Context:
2715  * May sleep if called *not* from interrupt context.
2716  * Must not be called in NMI context (strictly speaking, it could be
2717  * if we have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
2718  * conventions for vfree() arch-dependent would be a really bad idea).
2719  */
vfree(const void * addr)2720 void vfree(const void *addr)
2721 {
2722 	BUG_ON(in_nmi());
2723 
2724 	kmemleak_free(addr);
2725 
2726 	might_sleep_if(!in_interrupt());
2727 
2728 	if (!addr)
2729 		return;
2730 
2731 	__vfree(addr);
2732 }
2733 EXPORT_SYMBOL(vfree);
2734 
2735 /**
2736  * vunmap - release virtual mapping obtained by vmap()
2737  * @addr:   memory base address
2738  *
2739  * Free the virtually contiguous memory area starting at @addr,
2740  * which was created from the page array passed to vmap().
2741  *
2742  * Must not be called in interrupt context.
2743  */
vunmap(const void * addr)2744 void vunmap(const void *addr)
2745 {
2746 	BUG_ON(in_interrupt());
2747 	might_sleep();
2748 	if (addr)
2749 		__vunmap(addr, 0);
2750 }
2751 EXPORT_SYMBOL(vunmap);
2752 
2753 /**
2754  * vmap - map an array of pages into virtually contiguous space
2755  * @pages: array of page pointers
2756  * @count: number of pages to map
2757  * @flags: vm_area->flags
2758  * @prot: page protection for the mapping
2759  *
2760  * Maps @count pages from @pages into contiguous kernel virtual space.
2761  * If @flags contains %VM_MAP_PUT_PAGES the ownership of the pages array itself
2762  * (which must be kmalloc or vmalloc memory) and one reference per pages in it
2763  * are transferred from the caller to vmap(), and will be freed / dropped when
2764  * vfree() is called on the return value.
2765  *
2766  * Return: the address of the area or %NULL on failure
2767  */
vmap(struct page ** pages,unsigned int count,unsigned long flags,pgprot_t prot)2768 void *vmap(struct page **pages, unsigned int count,
2769 	   unsigned long flags, pgprot_t prot)
2770 {
2771 	struct vm_struct *area;
2772 	unsigned long addr;
2773 	unsigned long size;		/* In bytes */
2774 
2775 	might_sleep();
2776 
2777 	if (count > totalram_pages())
2778 		return NULL;
2779 
2780 	size = (unsigned long)count << PAGE_SHIFT;
2781 	area = get_vm_area_caller(size, flags, __builtin_return_address(0));
2782 	if (!area)
2783 		return NULL;
2784 
2785 	addr = (unsigned long)area->addr;
2786 	if (vmap_pages_range(addr, addr + size, pgprot_nx(prot),
2787 				pages, PAGE_SHIFT) < 0) {
2788 		vunmap(area->addr);
2789 		return NULL;
2790 	}
2791 
2792 	if (flags & VM_MAP_PUT_PAGES) {
2793 		area->pages = pages;
2794 		area->nr_pages = count;
2795 	}
2796 	return area->addr;
2797 }
2798 EXPORT_SYMBOL(vmap);
2799 
2800 #ifdef CONFIG_VMAP_PFN
2801 struct vmap_pfn_data {
2802 	unsigned long	*pfns;
2803 	pgprot_t	prot;
2804 	unsigned int	idx;
2805 };
2806 
vmap_pfn_apply(pte_t * pte,unsigned long addr,void * private)2807 static int vmap_pfn_apply(pte_t *pte, unsigned long addr, void *private)
2808 {
2809 	struct vmap_pfn_data *data = private;
2810 
2811 	if (WARN_ON_ONCE(pfn_valid(data->pfns[data->idx])))
2812 		return -EINVAL;
2813 	*pte = pte_mkspecial(pfn_pte(data->pfns[data->idx++], data->prot));
2814 	return 0;
2815 }
2816 
2817 /**
2818  * vmap_pfn - map an array of PFNs into virtually contiguous space
2819  * @pfns: array of PFNs
2820  * @count: number of pages to map
2821  * @prot: page protection for the mapping
2822  *
2823  * Maps @count PFNs from @pfns into contiguous kernel virtual space and returns
2824  * the start address of the mapping.
2825  */
vmap_pfn(unsigned long * pfns,unsigned int count,pgprot_t prot)2826 void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot)
2827 {
2828 	struct vmap_pfn_data data = { .pfns = pfns, .prot = pgprot_nx(prot) };
2829 	struct vm_struct *area;
2830 
2831 	area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP,
2832 			__builtin_return_address(0));
2833 	if (!area)
2834 		return NULL;
2835 	if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
2836 			count * PAGE_SIZE, vmap_pfn_apply, &data)) {
2837 		free_vm_area(area);
2838 		return NULL;
2839 	}
2840 
2841 	flush_cache_vmap((unsigned long)area->addr,
2842 			 (unsigned long)area->addr + count * PAGE_SIZE);
2843 
2844 	return area->addr;
2845 }
2846 EXPORT_SYMBOL_GPL(vmap_pfn);
2847 #endif /* CONFIG_VMAP_PFN */
2848 
2849 static inline unsigned int
vm_area_alloc_pages(gfp_t gfp,int nid,unsigned int order,unsigned int nr_pages,struct page ** pages)2850 vm_area_alloc_pages(gfp_t gfp, int nid,
2851 		unsigned int order, unsigned int nr_pages, struct page **pages)
2852 {
2853 	unsigned int nr_allocated = 0;
2854 	struct page *page;
2855 	int i;
2856 
2857 	/*
2858 	 * For order-0 pages we make use of bulk allocator, if
2859 	 * the page array is partly or not at all populated due
2860 	 * to fails, fallback to a single page allocator that is
2861 	 * more permissive.
2862 	 */
2863 	if (!order && nid != NUMA_NO_NODE) {
2864 		while (nr_allocated < nr_pages) {
2865 			unsigned int nr, nr_pages_request;
2866 
2867 			/*
2868 			 * A maximum allowed request is hard-coded and is 100
2869 			 * pages per call. That is done in order to prevent a
2870 			 * long preemption off scenario in the bulk-allocator
2871 			 * so the range is [1:100].
2872 			 */
2873 			nr_pages_request = min(100U, nr_pages - nr_allocated);
2874 
2875 			nr = alloc_pages_bulk_array_node(gfp, nid,
2876 				nr_pages_request, pages + nr_allocated);
2877 
2878 			nr_allocated += nr;
2879 			cond_resched();
2880 
2881 			/*
2882 			 * If zero or pages were obtained partly,
2883 			 * fallback to a single page allocator.
2884 			 */
2885 			if (nr != nr_pages_request)
2886 				break;
2887 		}
2888 	} else if (order)
2889 		/*
2890 		 * Compound pages required for remap_vmalloc_page if
2891 		 * high-order pages.
2892 		 */
2893 		gfp |= __GFP_COMP;
2894 
2895 	/* High-order pages or fallback path if "bulk" fails. */
2896 
2897 	while (nr_allocated < nr_pages) {
2898 		if (nid == NUMA_NO_NODE)
2899 			page = alloc_pages(gfp, order);
2900 		else
2901 			page = alloc_pages_node(nid, gfp, order);
2902 		if (unlikely(!page))
2903 			break;
2904 
2905 		/*
2906 		 * Careful, we allocate and map page-order pages, but
2907 		 * tracking is done per PAGE_SIZE page so as to keep the
2908 		 * vm_struct APIs independent of the physical/mapped size.
2909 		 */
2910 		for (i = 0; i < (1U << order); i++)
2911 			pages[nr_allocated + i] = page + i;
2912 
2913 		cond_resched();
2914 		nr_allocated += 1U << order;
2915 	}
2916 
2917 	return nr_allocated;
2918 }
2919 
__vmalloc_area_node(struct vm_struct * area,gfp_t gfp_mask,pgprot_t prot,unsigned int page_shift,int node)2920 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
2921 				 pgprot_t prot, unsigned int page_shift,
2922 				 int node)
2923 {
2924 	const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
2925 	unsigned long addr = (unsigned long)area->addr;
2926 	unsigned long size = get_vm_area_size(area);
2927 	unsigned long array_size;
2928 	unsigned int nr_small_pages = size >> PAGE_SHIFT;
2929 	unsigned int page_order;
2930 
2931 	array_size = (unsigned long)nr_small_pages * sizeof(struct page *);
2932 	gfp_mask |= __GFP_NOWARN;
2933 	if (!(gfp_mask & (GFP_DMA | GFP_DMA32)))
2934 		gfp_mask |= __GFP_HIGHMEM;
2935 
2936 	/* Please note that the recursion is strictly bounded. */
2937 	if (array_size > PAGE_SIZE) {
2938 		area->pages = __vmalloc_node(array_size, 1, nested_gfp, node,
2939 					area->caller);
2940 	} else {
2941 		area->pages = kmalloc_node(array_size, nested_gfp, node);
2942 	}
2943 
2944 	if (!area->pages) {
2945 		warn_alloc(gfp_mask, NULL,
2946 			"vmalloc error: size %lu, failed to allocated page array size %lu",
2947 			nr_small_pages * PAGE_SIZE, array_size);
2948 		free_vm_area(area);
2949 		return NULL;
2950 	}
2951 
2952 	set_vm_area_page_order(area, page_shift - PAGE_SHIFT);
2953 	page_order = vm_area_page_order(area);
2954 
2955 	area->nr_pages = vm_area_alloc_pages(gfp_mask, node,
2956 		page_order, nr_small_pages, area->pages);
2957 
2958 	atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
2959 
2960 	/*
2961 	 * If not enough pages were obtained to accomplish an
2962 	 * allocation request, free them via __vfree() if any.
2963 	 */
2964 	if (area->nr_pages != nr_small_pages) {
2965 		/* vm_area_alloc_pages() can also fail due to a fatal signal */
2966 		if (!fatal_signal_pending(current))
2967 			warn_alloc(gfp_mask, NULL,
2968 				"vmalloc error: size %lu, page order %u, failed to allocate pages",
2969 				area->nr_pages * PAGE_SIZE, page_order);
2970 		goto fail;
2971 	}
2972 
2973 	if (vmap_pages_range(addr, addr + size, prot, area->pages,
2974 			page_shift) < 0) {
2975 		warn_alloc(gfp_mask, NULL,
2976 			"vmalloc error: size %lu, failed to map pages",
2977 			area->nr_pages * PAGE_SIZE);
2978 		goto fail;
2979 	}
2980 
2981 	return area->addr;
2982 
2983 fail:
2984 	__vfree(area->addr);
2985 	return NULL;
2986 }
2987 
2988 /**
2989  * __vmalloc_node_range - allocate virtually contiguous memory
2990  * @size:		  allocation size
2991  * @align:		  desired alignment
2992  * @start:		  vm area range start
2993  * @end:		  vm area range end
2994  * @gfp_mask:		  flags for the page level allocator
2995  * @prot:		  protection mask for the allocated pages
2996  * @vm_flags:		  additional vm area flags (e.g. %VM_NO_GUARD)
2997  * @node:		  node to use for allocation or NUMA_NO_NODE
2998  * @caller:		  caller's return address
2999  *
3000  * Allocate enough pages to cover @size from the page level
3001  * allocator with @gfp_mask flags.  Map them into contiguous
3002  * kernel virtual space, using a pagetable protection of @prot.
3003  *
3004  * Return: the address of the area or %NULL on failure
3005  */
__vmalloc_node_range(unsigned long size,unsigned long align,unsigned long start,unsigned long end,gfp_t gfp_mask,pgprot_t prot,unsigned long vm_flags,int node,const void * caller)3006 void *__vmalloc_node_range(unsigned long size, unsigned long align,
3007 			unsigned long start, unsigned long end, gfp_t gfp_mask,
3008 			pgprot_t prot, unsigned long vm_flags, int node,
3009 			const void *caller)
3010 {
3011 	struct vm_struct *area;
3012 	void *ret;
3013 	kasan_vmalloc_flags_t kasan_flags = KASAN_VMALLOC_NONE;
3014 	unsigned long real_size = size;
3015 	unsigned long real_align = align;
3016 	unsigned int shift = PAGE_SHIFT;
3017 
3018 	if (WARN_ON_ONCE(!size))
3019 		return NULL;
3020 
3021 	if ((size >> PAGE_SHIFT) > totalram_pages()) {
3022 		warn_alloc(gfp_mask, NULL,
3023 			"vmalloc error: size %lu, exceeds total pages",
3024 			real_size);
3025 		return NULL;
3026 	}
3027 
3028 	if (vmap_allow_huge && !(vm_flags & VM_NO_HUGE_VMAP)) {
3029 		unsigned long size_per_node;
3030 
3031 		/*
3032 		 * Try huge pages. Only try for PAGE_KERNEL allocations,
3033 		 * others like modules don't yet expect huge pages in
3034 		 * their allocations due to apply_to_page_range not
3035 		 * supporting them.
3036 		 */
3037 
3038 		size_per_node = size;
3039 		if (node == NUMA_NO_NODE)
3040 			size_per_node /= num_online_nodes();
3041 		if (arch_vmap_pmd_supported(prot) && size_per_node >= PMD_SIZE)
3042 			shift = PMD_SHIFT;
3043 		else
3044 			shift = arch_vmap_pte_supported_shift(size_per_node);
3045 
3046 		align = max(real_align, 1UL << shift);
3047 		size = ALIGN(real_size, 1UL << shift);
3048 	}
3049 
3050 again:
3051 	area = __get_vm_area_node(real_size, align, shift, VM_ALLOC |
3052 				  VM_UNINITIALIZED | vm_flags, start, end, node,
3053 				  gfp_mask, caller);
3054 	if (!area) {
3055 		warn_alloc(gfp_mask, NULL,
3056 			"vmalloc error: size %lu, vm_struct allocation failed",
3057 			real_size);
3058 		goto fail;
3059 	}
3060 
3061 	/*
3062 	 * Prepare arguments for __vmalloc_area_node() and
3063 	 * kasan_unpoison_vmalloc().
3064 	 */
3065 	if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) {
3066 		if (kasan_hw_tags_enabled()) {
3067 			/*
3068 			 * Modify protection bits to allow tagging.
3069 			 * This must be done before mapping.
3070 			 */
3071 			prot = arch_vmap_pgprot_tagged(prot);
3072 
3073 			/*
3074 			 * Skip page_alloc poisoning and zeroing for physical
3075 			 * pages backing VM_ALLOC mapping. Memory is instead
3076 			 * poisoned and zeroed by kasan_unpoison_vmalloc().
3077 			 */
3078 			gfp_mask |= __GFP_SKIP_KASAN_UNPOISON | __GFP_SKIP_ZERO;
3079 		}
3080 
3081 		/* Take note that the mapping is PAGE_KERNEL. */
3082 		kasan_flags |= KASAN_VMALLOC_PROT_NORMAL;
3083 	}
3084 
3085 	/* Allocate physical pages and map them into vmalloc space. */
3086 	ret = __vmalloc_area_node(area, gfp_mask, prot, shift, node);
3087 	if (!ret)
3088 		goto fail;
3089 
3090 	/*
3091 	 * Mark the pages as accessible, now that they are mapped.
3092 	 * The init condition should match the one in post_alloc_hook()
3093 	 * (except for the should_skip_init() check) to make sure that memory
3094 	 * is initialized under the same conditions regardless of the enabled
3095 	 * KASAN mode.
3096 	 * Tag-based KASAN modes only assign tags to normal non-executable
3097 	 * allocations, see __kasan_unpoison_vmalloc().
3098 	 */
3099 	kasan_flags |= KASAN_VMALLOC_VM_ALLOC;
3100 	if (!want_init_on_free() && want_init_on_alloc(gfp_mask))
3101 		kasan_flags |= KASAN_VMALLOC_INIT;
3102 	/* KASAN_VMALLOC_PROT_NORMAL already set if required. */
3103 	area->addr = kasan_unpoison_vmalloc(area->addr, real_size, kasan_flags);
3104 
3105 	/*
3106 	 * In this function, newly allocated vm_struct has VM_UNINITIALIZED
3107 	 * flag. It means that vm_struct is not fully initialized.
3108 	 * Now, it is fully initialized, so remove this flag here.
3109 	 */
3110 	clear_vm_uninitialized_flag(area);
3111 
3112 	size = PAGE_ALIGN(size);
3113 	if (!(vm_flags & VM_DEFER_KMEMLEAK))
3114 		kmemleak_vmalloc(area, size, gfp_mask);
3115 
3116 	return area->addr;
3117 
3118 fail:
3119 	if (shift > PAGE_SHIFT) {
3120 		shift = PAGE_SHIFT;
3121 		align = real_align;
3122 		size = real_size;
3123 		goto again;
3124 	}
3125 
3126 	return NULL;
3127 }
3128 
3129 /**
3130  * __vmalloc_node - allocate virtually contiguous memory
3131  * @size:	    allocation size
3132  * @align:	    desired alignment
3133  * @gfp_mask:	    flags for the page level allocator
3134  * @node:	    node to use for allocation or NUMA_NO_NODE
3135  * @caller:	    caller's return address
3136  *
3137  * Allocate enough pages to cover @size from the page level allocator with
3138  * @gfp_mask flags.  Map them into contiguous kernel virtual space.
3139  *
3140  * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
3141  * and __GFP_NOFAIL are not supported
3142  *
3143  * Any use of gfp flags outside of GFP_KERNEL should be consulted
3144  * with mm people.
3145  *
3146  * Return: pointer to the allocated memory or %NULL on error
3147  */
__vmalloc_node(unsigned long size,unsigned long align,gfp_t gfp_mask,int node,const void * caller)3148 void *__vmalloc_node(unsigned long size, unsigned long align,
3149 			    gfp_t gfp_mask, int node, const void *caller)
3150 {
3151 	return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
3152 				gfp_mask, PAGE_KERNEL, 0, node, caller);
3153 }
3154 /*
3155  * This is only for performance analysis of vmalloc and stress purpose.
3156  * It is required by vmalloc test module, therefore do not use it other
3157  * than that.
3158  */
3159 #ifdef CONFIG_TEST_VMALLOC_MODULE
3160 EXPORT_SYMBOL_GPL(__vmalloc_node);
3161 #endif
3162 
__vmalloc(unsigned long size,gfp_t gfp_mask)3163 void *__vmalloc(unsigned long size, gfp_t gfp_mask)
3164 {
3165 	return __vmalloc_node(size, 1, gfp_mask, NUMA_NO_NODE,
3166 				__builtin_return_address(0));
3167 }
3168 EXPORT_SYMBOL(__vmalloc);
3169 
3170 /**
3171  * vmalloc - allocate virtually contiguous memory
3172  * @size:    allocation size
3173  *
3174  * Allocate enough pages to cover @size from the page level
3175  * allocator and map them into contiguous kernel virtual space.
3176  *
3177  * For tight control over page level allocator and protection flags
3178  * use __vmalloc() instead.
3179  *
3180  * Return: pointer to the allocated memory or %NULL on error
3181  */
vmalloc(unsigned long size)3182 void *vmalloc(unsigned long size)
3183 {
3184 	return __vmalloc_node(size, 1, GFP_KERNEL, NUMA_NO_NODE,
3185 				__builtin_return_address(0));
3186 }
3187 EXPORT_SYMBOL(vmalloc);
3188 
3189 /**
3190  * vmalloc_no_huge - allocate virtually contiguous memory using small pages
3191  * @size:    allocation size
3192  *
3193  * Allocate enough non-huge pages to cover @size from the page level
3194  * allocator and map them into contiguous kernel virtual space.
3195  *
3196  * Return: pointer to the allocated memory or %NULL on error
3197  */
vmalloc_no_huge(unsigned long size)3198 void *vmalloc_no_huge(unsigned long size)
3199 {
3200 	return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
3201 				    GFP_KERNEL, PAGE_KERNEL, VM_NO_HUGE_VMAP,
3202 				    NUMA_NO_NODE, __builtin_return_address(0));
3203 }
3204 EXPORT_SYMBOL(vmalloc_no_huge);
3205 
3206 /**
3207  * vzalloc - allocate virtually contiguous memory with zero fill
3208  * @size:    allocation size
3209  *
3210  * Allocate enough pages to cover @size from the page level
3211  * allocator and map them into contiguous kernel virtual space.
3212  * The memory allocated is set to zero.
3213  *
3214  * For tight control over page level allocator and protection flags
3215  * use __vmalloc() instead.
3216  *
3217  * Return: pointer to the allocated memory or %NULL on error
3218  */
vzalloc(unsigned long size)3219 void *vzalloc(unsigned long size)
3220 {
3221 	return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE,
3222 				__builtin_return_address(0));
3223 }
3224 EXPORT_SYMBOL(vzalloc);
3225 
3226 /**
3227  * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
3228  * @size: allocation size
3229  *
3230  * The resulting memory area is zeroed so it can be mapped to userspace
3231  * without leaking data.
3232  *
3233  * Return: pointer to the allocated memory or %NULL on error
3234  */
vmalloc_user(unsigned long size)3235 void *vmalloc_user(unsigned long size)
3236 {
3237 	return __vmalloc_node_range(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
3238 				    GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL,
3239 				    VM_USERMAP, NUMA_NO_NODE,
3240 				    __builtin_return_address(0));
3241 }
3242 EXPORT_SYMBOL(vmalloc_user);
3243 
3244 /**
3245  * vmalloc_node - allocate memory on a specific node
3246  * @size:	  allocation size
3247  * @node:	  numa node
3248  *
3249  * Allocate enough pages to cover @size from the page level
3250  * allocator and map them into contiguous kernel virtual space.
3251  *
3252  * For tight control over page level allocator and protection flags
3253  * use __vmalloc() instead.
3254  *
3255  * Return: pointer to the allocated memory or %NULL on error
3256  */
vmalloc_node(unsigned long size,int node)3257 void *vmalloc_node(unsigned long size, int node)
3258 {
3259 	return __vmalloc_node(size, 1, GFP_KERNEL, node,
3260 			__builtin_return_address(0));
3261 }
3262 EXPORT_SYMBOL(vmalloc_node);
3263 
3264 /**
3265  * vzalloc_node - allocate memory on a specific node with zero fill
3266  * @size:	allocation size
3267  * @node:	numa node
3268  *
3269  * Allocate enough pages to cover @size from the page level
3270  * allocator and map them into contiguous kernel virtual space.
3271  * The memory allocated is set to zero.
3272  *
3273  * Return: pointer to the allocated memory or %NULL on error
3274  */
vzalloc_node(unsigned long size,int node)3275 void *vzalloc_node(unsigned long size, int node)
3276 {
3277 	return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, node,
3278 				__builtin_return_address(0));
3279 }
3280 EXPORT_SYMBOL(vzalloc_node);
3281 
3282 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
3283 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
3284 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
3285 #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
3286 #else
3287 /*
3288  * 64b systems should always have either DMA or DMA32 zones. For others
3289  * GFP_DMA32 should do the right thing and use the normal zone.
3290  */
3291 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
3292 #endif
3293 
3294 /**
3295  * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
3296  * @size:	allocation size
3297  *
3298  * Allocate enough 32bit PA addressable pages to cover @size from the
3299  * page level allocator and map them into contiguous kernel virtual space.
3300  *
3301  * Return: pointer to the allocated memory or %NULL on error
3302  */
vmalloc_32(unsigned long size)3303 void *vmalloc_32(unsigned long size)
3304 {
3305 	return __vmalloc_node(size, 1, GFP_VMALLOC32, NUMA_NO_NODE,
3306 			__builtin_return_address(0));
3307 }
3308 EXPORT_SYMBOL(vmalloc_32);
3309 
3310 /**
3311  * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
3312  * @size:	     allocation size
3313  *
3314  * The resulting memory area is 32bit addressable and zeroed so it can be
3315  * mapped to userspace without leaking data.
3316  *
3317  * Return: pointer to the allocated memory or %NULL on error
3318  */
vmalloc_32_user(unsigned long size)3319 void *vmalloc_32_user(unsigned long size)
3320 {
3321 	return __vmalloc_node_range(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
3322 				    GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
3323 				    VM_USERMAP, NUMA_NO_NODE,
3324 				    __builtin_return_address(0));
3325 }
3326 EXPORT_SYMBOL(vmalloc_32_user);
3327 
3328 /*
3329  * small helper routine , copy contents to buf from addr.
3330  * If the page is not present, fill zero.
3331  */
3332 
aligned_vread(char * buf,char * addr,unsigned long count)3333 static int aligned_vread(char *buf, char *addr, unsigned long count)
3334 {
3335 	struct page *p;
3336 	int copied = 0;
3337 
3338 	while (count) {
3339 		unsigned long offset, length;
3340 
3341 		offset = offset_in_page(addr);
3342 		length = PAGE_SIZE - offset;
3343 		if (length > count)
3344 			length = count;
3345 		p = vmalloc_to_page(addr);
3346 		/*
3347 		 * To do safe access to this _mapped_ area, we need
3348 		 * lock. But adding lock here means that we need to add
3349 		 * overhead of vmalloc()/vfree() calls for this _debug_
3350 		 * interface, rarely used. Instead of that, we'll use
3351 		 * kmap() and get small overhead in this access function.
3352 		 */
3353 		if (p) {
3354 			/* We can expect USER0 is not used -- see vread() */
3355 			void *map = kmap_atomic(p);
3356 			memcpy(buf, map + offset, length);
3357 			kunmap_atomic(map);
3358 		} else
3359 			memset(buf, 0, length);
3360 
3361 		addr += length;
3362 		buf += length;
3363 		copied += length;
3364 		count -= length;
3365 	}
3366 	return copied;
3367 }
3368 
3369 /**
3370  * vread() - read vmalloc area in a safe way.
3371  * @buf:     buffer for reading data
3372  * @addr:    vm address.
3373  * @count:   number of bytes to be read.
3374  *
3375  * This function checks that addr is a valid vmalloc'ed area, and
3376  * copy data from that area to a given buffer. If the given memory range
3377  * of [addr...addr+count) includes some valid address, data is copied to
3378  * proper area of @buf. If there are memory holes, they'll be zero-filled.
3379  * IOREMAP area is treated as memory hole and no copy is done.
3380  *
3381  * If [addr...addr+count) doesn't includes any intersects with alive
3382  * vm_struct area, returns 0. @buf should be kernel's buffer.
3383  *
3384  * Note: In usual ops, vread() is never necessary because the caller
3385  * should know vmalloc() area is valid and can use memcpy().
3386  * This is for routines which have to access vmalloc area without
3387  * any information, as /proc/kcore.
3388  *
3389  * Return: number of bytes for which addr and buf should be increased
3390  * (same number as @count) or %0 if [addr...addr+count) doesn't
3391  * include any intersection with valid vmalloc area
3392  */
vread(char * buf,char * addr,unsigned long count)3393 long vread(char *buf, char *addr, unsigned long count)
3394 {
3395 	struct vmap_area *va;
3396 	struct vm_struct *vm;
3397 	char *vaddr, *buf_start = buf;
3398 	unsigned long buflen = count;
3399 	unsigned long n;
3400 
3401 	addr = kasan_reset_tag(addr);
3402 
3403 	/* Don't allow overflow */
3404 	if ((unsigned long) addr + count < count)
3405 		count = -(unsigned long) addr;
3406 
3407 	spin_lock(&vmap_area_lock);
3408 	va = find_vmap_area_exceed_addr((unsigned long)addr);
3409 	if (!va)
3410 		goto finished;
3411 
3412 	/* no intersects with alive vmap_area */
3413 	if ((unsigned long)addr + count <= va->va_start)
3414 		goto finished;
3415 
3416 	list_for_each_entry_from(va, &vmap_area_list, list) {
3417 		if (!count)
3418 			break;
3419 
3420 		if (!va->vm)
3421 			continue;
3422 
3423 		vm = va->vm;
3424 		vaddr = (char *) vm->addr;
3425 		if (addr >= vaddr + get_vm_area_size(vm))
3426 			continue;
3427 		while (addr < vaddr) {
3428 			if (count == 0)
3429 				goto finished;
3430 			*buf = '\0';
3431 			buf++;
3432 			addr++;
3433 			count--;
3434 		}
3435 		n = vaddr + get_vm_area_size(vm) - addr;
3436 		if (n > count)
3437 			n = count;
3438 		if (!(vm->flags & VM_IOREMAP))
3439 			aligned_vread(buf, addr, n);
3440 		else /* IOREMAP area is treated as memory hole */
3441 			memset(buf, 0, n);
3442 		buf += n;
3443 		addr += n;
3444 		count -= n;
3445 	}
3446 finished:
3447 	spin_unlock(&vmap_area_lock);
3448 
3449 	if (buf == buf_start)
3450 		return 0;
3451 	/* zero-fill memory holes */
3452 	if (buf != buf_start + buflen)
3453 		memset(buf, 0, buflen - (buf - buf_start));
3454 
3455 	return buflen;
3456 }
3457 
3458 /**
3459  * remap_vmalloc_range_partial - map vmalloc pages to userspace
3460  * @vma:		vma to cover
3461  * @uaddr:		target user address to start at
3462  * @kaddr:		virtual address of vmalloc kernel memory
3463  * @pgoff:		offset from @kaddr to start at
3464  * @size:		size of map area
3465  *
3466  * Returns:	0 for success, -Exxx on failure
3467  *
3468  * This function checks that @kaddr is a valid vmalloc'ed area,
3469  * and that it is big enough to cover the range starting at
3470  * @uaddr in @vma. Will return failure if that criteria isn't
3471  * met.
3472  *
3473  * Similar to remap_pfn_range() (see mm/memory.c)
3474  */
remap_vmalloc_range_partial(struct vm_area_struct * vma,unsigned long uaddr,void * kaddr,unsigned long pgoff,unsigned long size)3475 int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
3476 				void *kaddr, unsigned long pgoff,
3477 				unsigned long size)
3478 {
3479 	struct vm_struct *area;
3480 	unsigned long off;
3481 	unsigned long end_index;
3482 
3483 	if (check_shl_overflow(pgoff, PAGE_SHIFT, &off))
3484 		return -EINVAL;
3485 
3486 	size = PAGE_ALIGN(size);
3487 
3488 	if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
3489 		return -EINVAL;
3490 
3491 	area = find_vm_area(kaddr);
3492 	if (!area)
3493 		return -EINVAL;
3494 
3495 	if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT)))
3496 		return -EINVAL;
3497 
3498 	if (check_add_overflow(size, off, &end_index) ||
3499 	    end_index > get_vm_area_size(area))
3500 		return -EINVAL;
3501 	kaddr += off;
3502 
3503 	do {
3504 		struct page *page = vmalloc_to_page(kaddr);
3505 		int ret;
3506 
3507 		ret = vm_insert_page(vma, uaddr, page);
3508 		if (ret)
3509 			return ret;
3510 
3511 		uaddr += PAGE_SIZE;
3512 		kaddr += PAGE_SIZE;
3513 		size -= PAGE_SIZE;
3514 	} while (size > 0);
3515 
3516 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
3517 
3518 	return 0;
3519 }
3520 
3521 /**
3522  * remap_vmalloc_range - map vmalloc pages to userspace
3523  * @vma:		vma to cover (map full range of vma)
3524  * @addr:		vmalloc memory
3525  * @pgoff:		number of pages into addr before first page to map
3526  *
3527  * Returns:	0 for success, -Exxx on failure
3528  *
3529  * This function checks that addr is a valid vmalloc'ed area, and
3530  * that it is big enough to cover the vma. Will return failure if
3531  * that criteria isn't met.
3532  *
3533  * Similar to remap_pfn_range() (see mm/memory.c)
3534  */
remap_vmalloc_range(struct vm_area_struct * vma,void * addr,unsigned long pgoff)3535 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
3536 						unsigned long pgoff)
3537 {
3538 	return remap_vmalloc_range_partial(vma, vma->vm_start,
3539 					   addr, pgoff,
3540 					   vma->vm_end - vma->vm_start);
3541 }
3542 EXPORT_SYMBOL(remap_vmalloc_range);
3543 
free_vm_area(struct vm_struct * area)3544 void free_vm_area(struct vm_struct *area)
3545 {
3546 	struct vm_struct *ret;
3547 	ret = remove_vm_area(area->addr);
3548 	BUG_ON(ret != area);
3549 	kfree(area);
3550 }
3551 EXPORT_SYMBOL_GPL(free_vm_area);
3552 
3553 #ifdef CONFIG_SMP
node_to_va(struct rb_node * n)3554 static struct vmap_area *node_to_va(struct rb_node *n)
3555 {
3556 	return rb_entry_safe(n, struct vmap_area, rb_node);
3557 }
3558 
3559 /**
3560  * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to
3561  * @addr: target address
3562  *
3563  * Returns: vmap_area if it is found. If there is no such area
3564  *   the first highest(reverse order) vmap_area is returned
3565  *   i.e. va->va_start < addr && va->va_end < addr or NULL
3566  *   if there are no any areas before @addr.
3567  */
3568 static struct vmap_area *
pvm_find_va_enclose_addr(unsigned long addr)3569 pvm_find_va_enclose_addr(unsigned long addr)
3570 {
3571 	struct vmap_area *va, *tmp;
3572 	struct rb_node *n;
3573 
3574 	n = free_vmap_area_root.rb_node;
3575 	va = NULL;
3576 
3577 	while (n) {
3578 		tmp = rb_entry(n, struct vmap_area, rb_node);
3579 		if (tmp->va_start <= addr) {
3580 			va = tmp;
3581 			if (tmp->va_end >= addr)
3582 				break;
3583 
3584 			n = n->rb_right;
3585 		} else {
3586 			n = n->rb_left;
3587 		}
3588 	}
3589 
3590 	return va;
3591 }
3592 
3593 /**
3594  * pvm_determine_end_from_reverse - find the highest aligned address
3595  * of free block below VMALLOC_END
3596  * @va:
3597  *   in - the VA we start the search(reverse order);
3598  *   out - the VA with the highest aligned end address.
3599  * @align: alignment for required highest address
3600  *
3601  * Returns: determined end address within vmap_area
3602  */
3603 static unsigned long
pvm_determine_end_from_reverse(struct vmap_area ** va,unsigned long align)3604 pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align)
3605 {
3606 	unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
3607 	unsigned long addr;
3608 
3609 	if (likely(*va)) {
3610 		list_for_each_entry_from_reverse((*va),
3611 				&free_vmap_area_list, list) {
3612 			addr = min((*va)->va_end & ~(align - 1), vmalloc_end);
3613 			if ((*va)->va_start < addr)
3614 				return addr;
3615 		}
3616 	}
3617 
3618 	return 0;
3619 }
3620 
3621 /**
3622  * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
3623  * @offsets: array containing offset of each area
3624  * @sizes: array containing size of each area
3625  * @nr_vms: the number of areas to allocate
3626  * @align: alignment, all entries in @offsets and @sizes must be aligned to this
3627  *
3628  * Returns: kmalloc'd vm_struct pointer array pointing to allocated
3629  *	    vm_structs on success, %NULL on failure
3630  *
3631  * Percpu allocator wants to use congruent vm areas so that it can
3632  * maintain the offsets among percpu areas.  This function allocates
3633  * congruent vmalloc areas for it with GFP_KERNEL.  These areas tend to
3634  * be scattered pretty far, distance between two areas easily going up
3635  * to gigabytes.  To avoid interacting with regular vmallocs, these
3636  * areas are allocated from top.
3637  *
3638  * Despite its complicated look, this allocator is rather simple. It
3639  * does everything top-down and scans free blocks from the end looking
3640  * for matching base. While scanning, if any of the areas do not fit the
3641  * base address is pulled down to fit the area. Scanning is repeated till
3642  * all the areas fit and then all necessary data structures are inserted
3643  * and the result is returned.
3644  */
pcpu_get_vm_areas(const unsigned long * offsets,const size_t * sizes,int nr_vms,size_t align)3645 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
3646 				     const size_t *sizes, int nr_vms,
3647 				     size_t align)
3648 {
3649 	const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
3650 	const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
3651 	struct vmap_area **vas, *va;
3652 	struct vm_struct **vms;
3653 	int area, area2, last_area, term_area;
3654 	unsigned long base, start, size, end, last_end, orig_start, orig_end;
3655 	bool purged = false;
3656 	enum fit_type type;
3657 
3658 	/* verify parameters and allocate data structures */
3659 	BUG_ON(offset_in_page(align) || !is_power_of_2(align));
3660 	for (last_area = 0, area = 0; area < nr_vms; area++) {
3661 		start = offsets[area];
3662 		end = start + sizes[area];
3663 
3664 		/* is everything aligned properly? */
3665 		BUG_ON(!IS_ALIGNED(offsets[area], align));
3666 		BUG_ON(!IS_ALIGNED(sizes[area], align));
3667 
3668 		/* detect the area with the highest address */
3669 		if (start > offsets[last_area])
3670 			last_area = area;
3671 
3672 		for (area2 = area + 1; area2 < nr_vms; area2++) {
3673 			unsigned long start2 = offsets[area2];
3674 			unsigned long end2 = start2 + sizes[area2];
3675 
3676 			BUG_ON(start2 < end && start < end2);
3677 		}
3678 	}
3679 	last_end = offsets[last_area] + sizes[last_area];
3680 
3681 	if (vmalloc_end - vmalloc_start < last_end) {
3682 		WARN_ON(true);
3683 		return NULL;
3684 	}
3685 
3686 	vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
3687 	vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
3688 	if (!vas || !vms)
3689 		goto err_free2;
3690 
3691 	for (area = 0; area < nr_vms; area++) {
3692 		vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL);
3693 		vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
3694 		if (!vas[area] || !vms[area])
3695 			goto err_free;
3696 	}
3697 retry:
3698 	spin_lock(&free_vmap_area_lock);
3699 
3700 	/* start scanning - we scan from the top, begin with the last area */
3701 	area = term_area = last_area;
3702 	start = offsets[area];
3703 	end = start + sizes[area];
3704 
3705 	va = pvm_find_va_enclose_addr(vmalloc_end);
3706 	base = pvm_determine_end_from_reverse(&va, align) - end;
3707 
3708 	while (true) {
3709 		/*
3710 		 * base might have underflowed, add last_end before
3711 		 * comparing.
3712 		 */
3713 		if (base + last_end < vmalloc_start + last_end)
3714 			goto overflow;
3715 
3716 		/*
3717 		 * Fitting base has not been found.
3718 		 */
3719 		if (va == NULL)
3720 			goto overflow;
3721 
3722 		/*
3723 		 * If required width exceeds current VA block, move
3724 		 * base downwards and then recheck.
3725 		 */
3726 		if (base + end > va->va_end) {
3727 			base = pvm_determine_end_from_reverse(&va, align) - end;
3728 			term_area = area;
3729 			continue;
3730 		}
3731 
3732 		/*
3733 		 * If this VA does not fit, move base downwards and recheck.
3734 		 */
3735 		if (base + start < va->va_start) {
3736 			va = node_to_va(rb_prev(&va->rb_node));
3737 			base = pvm_determine_end_from_reverse(&va, align) - end;
3738 			term_area = area;
3739 			continue;
3740 		}
3741 
3742 		/*
3743 		 * This area fits, move on to the previous one.  If
3744 		 * the previous one is the terminal one, we're done.
3745 		 */
3746 		area = (area + nr_vms - 1) % nr_vms;
3747 		if (area == term_area)
3748 			break;
3749 
3750 		start = offsets[area];
3751 		end = start + sizes[area];
3752 		va = pvm_find_va_enclose_addr(base + end);
3753 	}
3754 
3755 	/* we've found a fitting base, insert all va's */
3756 	for (area = 0; area < nr_vms; area++) {
3757 		int ret;
3758 
3759 		start = base + offsets[area];
3760 		size = sizes[area];
3761 
3762 		va = pvm_find_va_enclose_addr(start);
3763 		if (WARN_ON_ONCE(va == NULL))
3764 			/* It is a BUG(), but trigger recovery instead. */
3765 			goto recovery;
3766 
3767 		type = classify_va_fit_type(va, start, size);
3768 		if (WARN_ON_ONCE(type == NOTHING_FIT))
3769 			/* It is a BUG(), but trigger recovery instead. */
3770 			goto recovery;
3771 
3772 		ret = adjust_va_to_fit_type(va, start, size, type);
3773 		if (unlikely(ret))
3774 			goto recovery;
3775 
3776 		/* Allocated area. */
3777 		va = vas[area];
3778 		va->va_start = start;
3779 		va->va_end = start + size;
3780 	}
3781 
3782 	spin_unlock(&free_vmap_area_lock);
3783 
3784 	/* populate the kasan shadow space */
3785 	for (area = 0; area < nr_vms; area++) {
3786 		if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area]))
3787 			goto err_free_shadow;
3788 	}
3789 
3790 	/* insert all vm's */
3791 	spin_lock(&vmap_area_lock);
3792 	for (area = 0; area < nr_vms; area++) {
3793 		insert_vmap_area(vas[area], &vmap_area_root, &vmap_area_list);
3794 
3795 		setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC,
3796 				 pcpu_get_vm_areas);
3797 	}
3798 	spin_unlock(&vmap_area_lock);
3799 
3800 	/*
3801 	 * Mark allocated areas as accessible. Do it now as a best-effort
3802 	 * approach, as they can be mapped outside of vmalloc code.
3803 	 * With hardware tag-based KASAN, marking is skipped for
3804 	 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
3805 	 */
3806 	for (area = 0; area < nr_vms; area++)
3807 		vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr,
3808 				vms[area]->size, KASAN_VMALLOC_PROT_NORMAL);
3809 
3810 	kfree(vas);
3811 	return vms;
3812 
3813 recovery:
3814 	/*
3815 	 * Remove previously allocated areas. There is no
3816 	 * need in removing these areas from the busy tree,
3817 	 * because they are inserted only on the final step
3818 	 * and when pcpu_get_vm_areas() is success.
3819 	 */
3820 	while (area--) {
3821 		orig_start = vas[area]->va_start;
3822 		orig_end = vas[area]->va_end;
3823 		va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
3824 				&free_vmap_area_list);
3825 		if (va)
3826 			kasan_release_vmalloc(orig_start, orig_end,
3827 				va->va_start, va->va_end);
3828 		vas[area] = NULL;
3829 	}
3830 
3831 overflow:
3832 	spin_unlock(&free_vmap_area_lock);
3833 	if (!purged) {
3834 		purge_vmap_area_lazy();
3835 		purged = true;
3836 
3837 		/* Before "retry", check if we recover. */
3838 		for (area = 0; area < nr_vms; area++) {
3839 			if (vas[area])
3840 				continue;
3841 
3842 			vas[area] = kmem_cache_zalloc(
3843 				vmap_area_cachep, GFP_KERNEL);
3844 			if (!vas[area])
3845 				goto err_free;
3846 		}
3847 
3848 		goto retry;
3849 	}
3850 
3851 err_free:
3852 	for (area = 0; area < nr_vms; area++) {
3853 		if (vas[area])
3854 			kmem_cache_free(vmap_area_cachep, vas[area]);
3855 
3856 		kfree(vms[area]);
3857 	}
3858 err_free2:
3859 	kfree(vas);
3860 	kfree(vms);
3861 	return NULL;
3862 
3863 err_free_shadow:
3864 	spin_lock(&free_vmap_area_lock);
3865 	/*
3866 	 * We release all the vmalloc shadows, even the ones for regions that
3867 	 * hadn't been successfully added. This relies on kasan_release_vmalloc
3868 	 * being able to tolerate this case.
3869 	 */
3870 	for (area = 0; area < nr_vms; area++) {
3871 		orig_start = vas[area]->va_start;
3872 		orig_end = vas[area]->va_end;
3873 		va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
3874 				&free_vmap_area_list);
3875 		if (va)
3876 			kasan_release_vmalloc(orig_start, orig_end,
3877 				va->va_start, va->va_end);
3878 		vas[area] = NULL;
3879 		kfree(vms[area]);
3880 	}
3881 	spin_unlock(&free_vmap_area_lock);
3882 	kfree(vas);
3883 	kfree(vms);
3884 	return NULL;
3885 }
3886 
3887 /**
3888  * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
3889  * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
3890  * @nr_vms: the number of allocated areas
3891  *
3892  * Free vm_structs and the array allocated by pcpu_get_vm_areas().
3893  */
pcpu_free_vm_areas(struct vm_struct ** vms,int nr_vms)3894 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
3895 {
3896 	int i;
3897 
3898 	for (i = 0; i < nr_vms; i++)
3899 		free_vm_area(vms[i]);
3900 	kfree(vms);
3901 }
3902 #endif	/* CONFIG_SMP */
3903 
3904 #ifdef CONFIG_PRINTK
vmalloc_dump_obj(void * object)3905 bool vmalloc_dump_obj(void *object)
3906 {
3907 	void *objp = (void *)PAGE_ALIGN((unsigned long)object);
3908 	const void *caller;
3909 	struct vm_struct *vm;
3910 	struct vmap_area *va;
3911 	unsigned long addr;
3912 	unsigned int nr_pages;
3913 
3914 	if (!spin_trylock(&vmap_area_lock))
3915 		return false;
3916 	va = __find_vmap_area((unsigned long)objp);
3917 	if (!va) {
3918 		spin_unlock(&vmap_area_lock);
3919 		return false;
3920 	}
3921 
3922 	vm = va->vm;
3923 	if (!vm) {
3924 		spin_unlock(&vmap_area_lock);
3925 		return false;
3926 	}
3927 	addr = (unsigned long)vm->addr;
3928 	caller = vm->caller;
3929 	nr_pages = vm->nr_pages;
3930 	spin_unlock(&vmap_area_lock);
3931 	pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
3932 		nr_pages, addr, caller);
3933 	return true;
3934 }
3935 #endif
3936 
3937 #ifdef CONFIG_PROC_FS
s_start(struct seq_file * m,loff_t * pos)3938 static void *s_start(struct seq_file *m, loff_t *pos)
3939 	__acquires(&vmap_purge_lock)
3940 	__acquires(&vmap_area_lock)
3941 {
3942 	mutex_lock(&vmap_purge_lock);
3943 	spin_lock(&vmap_area_lock);
3944 
3945 	return seq_list_start(&vmap_area_list, *pos);
3946 }
3947 
s_next(struct seq_file * m,void * p,loff_t * pos)3948 static void *s_next(struct seq_file *m, void *p, loff_t *pos)
3949 {
3950 	return seq_list_next(p, &vmap_area_list, pos);
3951 }
3952 
s_stop(struct seq_file * m,void * p)3953 static void s_stop(struct seq_file *m, void *p)
3954 	__releases(&vmap_area_lock)
3955 	__releases(&vmap_purge_lock)
3956 {
3957 	spin_unlock(&vmap_area_lock);
3958 	mutex_unlock(&vmap_purge_lock);
3959 }
3960 
show_numa_info(struct seq_file * m,struct vm_struct * v)3961 static void show_numa_info(struct seq_file *m, struct vm_struct *v)
3962 {
3963 	if (IS_ENABLED(CONFIG_NUMA)) {
3964 		unsigned int nr, *counters = m->private;
3965 
3966 		if (!counters)
3967 			return;
3968 
3969 		if (v->flags & VM_UNINITIALIZED)
3970 			return;
3971 		/* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
3972 		smp_rmb();
3973 
3974 		memset(counters, 0, nr_node_ids * sizeof(unsigned int));
3975 
3976 		for (nr = 0; nr < v->nr_pages; nr++)
3977 			counters[page_to_nid(v->pages[nr])]++;
3978 
3979 		for_each_node_state(nr, N_HIGH_MEMORY)
3980 			if (counters[nr])
3981 				seq_printf(m, " N%u=%u", nr, counters[nr]);
3982 	}
3983 }
3984 
show_purge_info(struct seq_file * m)3985 static void show_purge_info(struct seq_file *m)
3986 {
3987 	struct vmap_area *va;
3988 
3989 	spin_lock(&purge_vmap_area_lock);
3990 	list_for_each_entry(va, &purge_vmap_area_list, list) {
3991 		seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
3992 			(void *)va->va_start, (void *)va->va_end,
3993 			va->va_end - va->va_start);
3994 	}
3995 	spin_unlock(&purge_vmap_area_lock);
3996 }
3997 
s_show(struct seq_file * m,void * p)3998 static int s_show(struct seq_file *m, void *p)
3999 {
4000 	struct vmap_area *va;
4001 	struct vm_struct *v;
4002 
4003 	va = list_entry(p, struct vmap_area, list);
4004 
4005 	/*
4006 	 * s_show can encounter race with remove_vm_area, !vm on behalf
4007 	 * of vmap area is being tear down or vm_map_ram allocation.
4008 	 */
4009 	if (!va->vm) {
4010 		seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
4011 			(void *)va->va_start, (void *)va->va_end,
4012 			va->va_end - va->va_start);
4013 
4014 		return 0;
4015 	}
4016 
4017 	v = va->vm;
4018 
4019 	seq_printf(m, "0x%pK-0x%pK %7ld",
4020 		v->addr, v->addr + v->size, v->size);
4021 
4022 	if (v->caller)
4023 		seq_printf(m, " %pS", v->caller);
4024 
4025 	if (v->nr_pages)
4026 		seq_printf(m, " pages=%d", v->nr_pages);
4027 
4028 	if (v->phys_addr)
4029 		seq_printf(m, " phys=%pa", &v->phys_addr);
4030 
4031 	if (v->flags & VM_IOREMAP)
4032 		seq_puts(m, " ioremap");
4033 
4034 	if (v->flags & VM_ALLOC)
4035 		seq_puts(m, " vmalloc");
4036 
4037 	if (v->flags & VM_MAP)
4038 		seq_puts(m, " vmap");
4039 
4040 	if (v->flags & VM_USERMAP)
4041 		seq_puts(m, " user");
4042 
4043 	if (v->flags & VM_DMA_COHERENT)
4044 		seq_puts(m, " dma-coherent");
4045 
4046 	if (is_vmalloc_addr(v->pages))
4047 		seq_puts(m, " vpages");
4048 
4049 	show_numa_info(m, v);
4050 	seq_putc(m, '\n');
4051 
4052 	/*
4053 	 * As a final step, dump "unpurged" areas.
4054 	 */
4055 	if (list_is_last(&va->list, &vmap_area_list))
4056 		show_purge_info(m);
4057 
4058 	return 0;
4059 }
4060 
4061 static const struct seq_operations vmalloc_op = {
4062 	.start = s_start,
4063 	.next = s_next,
4064 	.stop = s_stop,
4065 	.show = s_show,
4066 };
4067 
proc_vmalloc_init(void)4068 static int __init proc_vmalloc_init(void)
4069 {
4070 	if (IS_ENABLED(CONFIG_NUMA))
4071 		proc_create_seq_private("vmallocinfo", 0400, NULL,
4072 				&vmalloc_op,
4073 				nr_node_ids * sizeof(unsigned int), NULL);
4074 	else
4075 		proc_create_seq("vmallocinfo", 0400, NULL, &vmalloc_op);
4076 	return 0;
4077 }
4078 module_init(proc_vmalloc_init);
4079 
4080 #endif
4081