• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  Copyright (C) 1993  Linus Torvalds
4  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
5  *  SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
6  *  Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
7  *  Numa awareness, Christoph Lameter, SGI, June 2005
8  *  Improving global KVA allocator, Uladzislau Rezki, Sony, May 2019
9  */
10 
11 #include <linux/vmalloc.h>
12 #include <linux/mm.h>
13 #include <linux/module.h>
14 #include <linux/highmem.h>
15 #include <linux/sched/signal.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/interrupt.h>
19 #include <linux/proc_fs.h>
20 #include <linux/seq_file.h>
21 #include <linux/set_memory.h>
22 #include <linux/debugobjects.h>
23 #include <linux/kallsyms.h>
24 #include <linux/list.h>
25 #include <linux/notifier.h>
26 #include <linux/rbtree.h>
27 #include <linux/xarray.h>
28 #include <linux/io.h>
29 #include <linux/rcupdate.h>
30 #include <linux/pfn.h>
31 #include <linux/kmemleak.h>
32 #include <linux/atomic.h>
33 #include <linux/compiler.h>
34 #include <linux/llist.h>
35 #include <linux/bitops.h>
36 #include <linux/rbtree_augmented.h>
37 #include <linux/overflow.h>
38 #include <linux/pgtable.h>
39 #include <linux/uaccess.h>
40 #include <linux/hugetlb.h>
41 #include <linux/io.h>
42 #include <asm/tlbflush.h>
43 #include <asm/shmparam.h>
44 
45 #include "internal.h"
46 #include "pgalloc-track.h"
47 
48 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
49 static unsigned int __ro_after_init ioremap_max_page_shift = BITS_PER_LONG - 1;
50 
set_nohugeiomap(char * str)51 static int __init set_nohugeiomap(char *str)
52 {
53 	ioremap_max_page_shift = PAGE_SHIFT;
54 	return 0;
55 }
56 early_param("nohugeiomap", set_nohugeiomap);
57 #else /* CONFIG_HAVE_ARCH_HUGE_VMAP */
58 static const unsigned int ioremap_max_page_shift = PAGE_SHIFT;
59 #endif	/* CONFIG_HAVE_ARCH_HUGE_VMAP */
60 
61 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
62 static bool __ro_after_init vmap_allow_huge = true;
63 
set_nohugevmalloc(char * str)64 static int __init set_nohugevmalloc(char *str)
65 {
66 	vmap_allow_huge = false;
67 	return 0;
68 }
69 early_param("nohugevmalloc", set_nohugevmalloc);
70 #else /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
71 static const bool vmap_allow_huge = false;
72 #endif	/* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
73 
is_vmalloc_addr(const void * x)74 bool is_vmalloc_addr(const void *x)
75 {
76 	unsigned long addr = (unsigned long)kasan_reset_tag(x);
77 
78 	return addr >= VMALLOC_START && addr < VMALLOC_END;
79 }
80 EXPORT_SYMBOL(is_vmalloc_addr);
81 
82 struct vfree_deferred {
83 	struct llist_head list;
84 	struct work_struct wq;
85 };
86 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
87 
88 static void __vunmap(const void *, int);
89 
free_work(struct work_struct * w)90 static void free_work(struct work_struct *w)
91 {
92 	struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
93 	struct llist_node *t, *llnode;
94 
95 	llist_for_each_safe(llnode, t, llist_del_all(&p->list))
96 		__vunmap((void *)llnode, 1);
97 }
98 
99 /*** Page table manipulation functions ***/
vmap_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift,pgtbl_mod_mask * mask)100 static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
101 			phys_addr_t phys_addr, pgprot_t prot,
102 			unsigned int max_page_shift, pgtbl_mod_mask *mask)
103 {
104 	pte_t *pte;
105 	u64 pfn;
106 	unsigned long size = PAGE_SIZE;
107 
108 	pfn = phys_addr >> PAGE_SHIFT;
109 	pte = pte_alloc_kernel_track(pmd, addr, mask);
110 	if (!pte)
111 		return -ENOMEM;
112 	do {
113 		BUG_ON(!pte_none(*pte));
114 
115 #ifdef CONFIG_HUGETLB_PAGE
116 		size = arch_vmap_pte_range_map_size(addr, end, pfn, max_page_shift);
117 		if (size != PAGE_SIZE) {
118 			pte_t entry = pfn_pte(pfn, prot);
119 
120 			entry = pte_mkhuge(entry);
121 			entry = arch_make_huge_pte(entry, ilog2(size), 0);
122 			set_huge_pte_at(&init_mm, addr, pte, entry);
123 			pfn += PFN_DOWN(size);
124 			continue;
125 		}
126 #endif
127 		set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
128 		pfn++;
129 	} while (pte += PFN_DOWN(size), addr += size, addr != end);
130 	*mask |= PGTBL_PTE_MODIFIED;
131 	return 0;
132 }
133 
vmap_try_huge_pmd(pmd_t * pmd,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift)134 static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end,
135 			phys_addr_t phys_addr, pgprot_t prot,
136 			unsigned int max_page_shift)
137 {
138 	if (max_page_shift < PMD_SHIFT)
139 		return 0;
140 
141 	if (!arch_vmap_pmd_supported(prot))
142 		return 0;
143 
144 	if ((end - addr) != PMD_SIZE)
145 		return 0;
146 
147 	if (!IS_ALIGNED(addr, PMD_SIZE))
148 		return 0;
149 
150 	if (!IS_ALIGNED(phys_addr, PMD_SIZE))
151 		return 0;
152 
153 	if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
154 		return 0;
155 
156 	return pmd_set_huge(pmd, phys_addr, prot);
157 }
158 
vmap_pmd_range(pud_t * pud,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift,pgtbl_mod_mask * mask)159 static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
160 			phys_addr_t phys_addr, pgprot_t prot,
161 			unsigned int max_page_shift, pgtbl_mod_mask *mask)
162 {
163 	pmd_t *pmd;
164 	unsigned long next;
165 
166 	pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
167 	if (!pmd)
168 		return -ENOMEM;
169 	do {
170 		next = pmd_addr_end(addr, end);
171 
172 		if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot,
173 					max_page_shift)) {
174 			*mask |= PGTBL_PMD_MODIFIED;
175 			continue;
176 		}
177 
178 		if (vmap_pte_range(pmd, addr, next, phys_addr, prot, max_page_shift, mask))
179 			return -ENOMEM;
180 	} while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
181 	return 0;
182 }
183 
vmap_try_huge_pud(pud_t * pud,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift)184 static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end,
185 			phys_addr_t phys_addr, pgprot_t prot,
186 			unsigned int max_page_shift)
187 {
188 	if (max_page_shift < PUD_SHIFT)
189 		return 0;
190 
191 	if (!arch_vmap_pud_supported(prot))
192 		return 0;
193 
194 	if ((end - addr) != PUD_SIZE)
195 		return 0;
196 
197 	if (!IS_ALIGNED(addr, PUD_SIZE))
198 		return 0;
199 
200 	if (!IS_ALIGNED(phys_addr, PUD_SIZE))
201 		return 0;
202 
203 	if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
204 		return 0;
205 
206 	return pud_set_huge(pud, phys_addr, prot);
207 }
208 
vmap_pud_range(p4d_t * p4d,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift,pgtbl_mod_mask * mask)209 static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
210 			phys_addr_t phys_addr, pgprot_t prot,
211 			unsigned int max_page_shift, pgtbl_mod_mask *mask)
212 {
213 	pud_t *pud;
214 	unsigned long next;
215 
216 	pud = pud_alloc_track(&init_mm, p4d, addr, mask);
217 	if (!pud)
218 		return -ENOMEM;
219 	do {
220 		next = pud_addr_end(addr, end);
221 
222 		if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot,
223 					max_page_shift)) {
224 			*mask |= PGTBL_PUD_MODIFIED;
225 			continue;
226 		}
227 
228 		if (vmap_pmd_range(pud, addr, next, phys_addr, prot,
229 					max_page_shift, mask))
230 			return -ENOMEM;
231 	} while (pud++, phys_addr += (next - addr), addr = next, addr != end);
232 	return 0;
233 }
234 
vmap_try_huge_p4d(p4d_t * p4d,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift)235 static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end,
236 			phys_addr_t phys_addr, pgprot_t prot,
237 			unsigned int max_page_shift)
238 {
239 	if (max_page_shift < P4D_SHIFT)
240 		return 0;
241 
242 	if (!arch_vmap_p4d_supported(prot))
243 		return 0;
244 
245 	if ((end - addr) != P4D_SIZE)
246 		return 0;
247 
248 	if (!IS_ALIGNED(addr, P4D_SIZE))
249 		return 0;
250 
251 	if (!IS_ALIGNED(phys_addr, P4D_SIZE))
252 		return 0;
253 
254 	if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
255 		return 0;
256 
257 	return p4d_set_huge(p4d, phys_addr, prot);
258 }
259 
vmap_p4d_range(pgd_t * pgd,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift,pgtbl_mod_mask * mask)260 static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
261 			phys_addr_t phys_addr, pgprot_t prot,
262 			unsigned int max_page_shift, pgtbl_mod_mask *mask)
263 {
264 	p4d_t *p4d;
265 	unsigned long next;
266 
267 	p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
268 	if (!p4d)
269 		return -ENOMEM;
270 	do {
271 		next = p4d_addr_end(addr, end);
272 
273 		if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot,
274 					max_page_shift)) {
275 			*mask |= PGTBL_P4D_MODIFIED;
276 			continue;
277 		}
278 
279 		if (vmap_pud_range(p4d, addr, next, phys_addr, prot,
280 					max_page_shift, mask))
281 			return -ENOMEM;
282 	} while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
283 	return 0;
284 }
285 
vmap_range_noflush(unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift)286 static int vmap_range_noflush(unsigned long addr, unsigned long end,
287 			phys_addr_t phys_addr, pgprot_t prot,
288 			unsigned int max_page_shift)
289 {
290 	pgd_t *pgd;
291 	unsigned long start;
292 	unsigned long next;
293 	int err;
294 	pgtbl_mod_mask mask = 0;
295 
296 	might_sleep();
297 	BUG_ON(addr >= end);
298 
299 	start = addr;
300 	pgd = pgd_offset_k(addr);
301 	do {
302 		next = pgd_addr_end(addr, end);
303 		err = vmap_p4d_range(pgd, addr, next, phys_addr, prot,
304 					max_page_shift, &mask);
305 		if (err)
306 			break;
307 	} while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
308 
309 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
310 		arch_sync_kernel_mappings(start, end);
311 
312 	return err;
313 }
314 
ioremap_page_range(unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot)315 int ioremap_page_range(unsigned long addr, unsigned long end,
316 		phys_addr_t phys_addr, pgprot_t prot)
317 {
318 	int err;
319 
320 	prot = pgprot_nx(prot);
321 	err = vmap_range_noflush(addr, end, phys_addr, prot,
322 				 ioremap_max_page_shift);
323 	flush_cache_vmap(addr, end);
324 
325 	if (IS_ENABLED(CONFIG_ARCH_HAS_IOREMAP_PHYS_HOOKS) && !err)
326 		ioremap_phys_range_hook(phys_addr, end - addr, prot);
327 
328 	return err;
329 }
330 
vunmap_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,pgtbl_mod_mask * mask)331 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
332 			     pgtbl_mod_mask *mask)
333 {
334 	pte_t *pte;
335 
336 	pte = pte_offset_kernel(pmd, addr);
337 	do {
338 		pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
339 		WARN_ON(!pte_none(ptent) && !pte_present(ptent));
340 	} while (pte++, addr += PAGE_SIZE, addr != end);
341 	*mask |= PGTBL_PTE_MODIFIED;
342 }
343 
vunmap_pmd_range(pud_t * pud,unsigned long addr,unsigned long end,pgtbl_mod_mask * mask)344 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
345 			     pgtbl_mod_mask *mask)
346 {
347 	pmd_t *pmd;
348 	unsigned long next;
349 	int cleared;
350 
351 	pmd = pmd_offset(pud, addr);
352 	do {
353 		next = pmd_addr_end(addr, end);
354 
355 		cleared = pmd_clear_huge(pmd);
356 		if (cleared || pmd_bad(*pmd))
357 			*mask |= PGTBL_PMD_MODIFIED;
358 
359 		if (cleared)
360 			continue;
361 		if (pmd_none_or_clear_bad(pmd))
362 			continue;
363 		vunmap_pte_range(pmd, addr, next, mask);
364 
365 		cond_resched();
366 	} while (pmd++, addr = next, addr != end);
367 }
368 
vunmap_pud_range(p4d_t * p4d,unsigned long addr,unsigned long end,pgtbl_mod_mask * mask)369 static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
370 			     pgtbl_mod_mask *mask)
371 {
372 	pud_t *pud;
373 	unsigned long next;
374 	int cleared;
375 
376 	pud = pud_offset(p4d, addr);
377 	do {
378 		next = pud_addr_end(addr, end);
379 
380 		cleared = pud_clear_huge(pud);
381 		if (cleared || pud_bad(*pud))
382 			*mask |= PGTBL_PUD_MODIFIED;
383 
384 		if (cleared)
385 			continue;
386 		if (pud_none_or_clear_bad(pud))
387 			continue;
388 		vunmap_pmd_range(pud, addr, next, mask);
389 	} while (pud++, addr = next, addr != end);
390 }
391 
vunmap_p4d_range(pgd_t * pgd,unsigned long addr,unsigned long end,pgtbl_mod_mask * mask)392 static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
393 			     pgtbl_mod_mask *mask)
394 {
395 	p4d_t *p4d;
396 	unsigned long next;
397 	int cleared;
398 
399 	p4d = p4d_offset(pgd, addr);
400 	do {
401 		next = p4d_addr_end(addr, end);
402 
403 		cleared = p4d_clear_huge(p4d);
404 		if (cleared || p4d_bad(*p4d))
405 			*mask |= PGTBL_P4D_MODIFIED;
406 
407 		if (cleared)
408 			continue;
409 		if (p4d_none_or_clear_bad(p4d))
410 			continue;
411 		vunmap_pud_range(p4d, addr, next, mask);
412 	} while (p4d++, addr = next, addr != end);
413 }
414 
415 /*
416  * vunmap_range_noflush is similar to vunmap_range, but does not
417  * flush caches or TLBs.
418  *
419  * The caller is responsible for calling flush_cache_vmap() before calling
420  * this function, and flush_tlb_kernel_range after it has returned
421  * successfully (and before the addresses are expected to cause a page fault
422  * or be re-mapped for something else, if TLB flushes are being delayed or
423  * coalesced).
424  *
425  * This is an internal function only. Do not use outside mm/.
426  */
vunmap_range_noflush(unsigned long start,unsigned long end)427 void vunmap_range_noflush(unsigned long start, unsigned long end)
428 {
429 	unsigned long next;
430 	pgd_t *pgd;
431 	unsigned long addr = start;
432 	pgtbl_mod_mask mask = 0;
433 
434 	BUG_ON(addr >= end);
435 	pgd = pgd_offset_k(addr);
436 	do {
437 		next = pgd_addr_end(addr, end);
438 		if (pgd_bad(*pgd))
439 			mask |= PGTBL_PGD_MODIFIED;
440 		if (pgd_none_or_clear_bad(pgd))
441 			continue;
442 		vunmap_p4d_range(pgd, addr, next, &mask);
443 	} while (pgd++, addr = next, addr != end);
444 
445 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
446 		arch_sync_kernel_mappings(start, end);
447 }
448 
449 /**
450  * vunmap_range - unmap kernel virtual addresses
451  * @addr: start of the VM area to unmap
452  * @end: end of the VM area to unmap (non-inclusive)
453  *
454  * Clears any present PTEs in the virtual address range, flushes TLBs and
455  * caches. Any subsequent access to the address before it has been re-mapped
456  * is a kernel bug.
457  */
vunmap_range(unsigned long addr,unsigned long end)458 void vunmap_range(unsigned long addr, unsigned long end)
459 {
460 	flush_cache_vunmap(addr, end);
461 	vunmap_range_noflush(addr, end);
462 	flush_tlb_kernel_range(addr, end);
463 }
464 
vmap_pages_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,int * nr,pgtbl_mod_mask * mask)465 static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
466 		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
467 		pgtbl_mod_mask *mask)
468 {
469 	pte_t *pte;
470 
471 	/*
472 	 * nr is a running index into the array which helps higher level
473 	 * callers keep track of where we're up to.
474 	 */
475 
476 	pte = pte_alloc_kernel_track(pmd, addr, mask);
477 	if (!pte)
478 		return -ENOMEM;
479 	do {
480 		struct page *page = pages[*nr];
481 
482 		if (WARN_ON(!pte_none(*pte)))
483 			return -EBUSY;
484 		if (WARN_ON(!page))
485 			return -ENOMEM;
486 		set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
487 		(*nr)++;
488 	} while (pte++, addr += PAGE_SIZE, addr != end);
489 	*mask |= PGTBL_PTE_MODIFIED;
490 	return 0;
491 }
492 
vmap_pages_pmd_range(pud_t * pud,unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,int * nr,pgtbl_mod_mask * mask)493 static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr,
494 		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
495 		pgtbl_mod_mask *mask)
496 {
497 	pmd_t *pmd;
498 	unsigned long next;
499 
500 	pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
501 	if (!pmd)
502 		return -ENOMEM;
503 	do {
504 		next = pmd_addr_end(addr, end);
505 		if (vmap_pages_pte_range(pmd, addr, next, prot, pages, nr, mask))
506 			return -ENOMEM;
507 	} while (pmd++, addr = next, addr != end);
508 	return 0;
509 }
510 
vmap_pages_pud_range(p4d_t * p4d,unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,int * nr,pgtbl_mod_mask * mask)511 static int vmap_pages_pud_range(p4d_t *p4d, unsigned long addr,
512 		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
513 		pgtbl_mod_mask *mask)
514 {
515 	pud_t *pud;
516 	unsigned long next;
517 
518 	pud = pud_alloc_track(&init_mm, p4d, addr, mask);
519 	if (!pud)
520 		return -ENOMEM;
521 	do {
522 		next = pud_addr_end(addr, end);
523 		if (vmap_pages_pmd_range(pud, addr, next, prot, pages, nr, mask))
524 			return -ENOMEM;
525 	} while (pud++, addr = next, addr != end);
526 	return 0;
527 }
528 
vmap_pages_p4d_range(pgd_t * pgd,unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,int * nr,pgtbl_mod_mask * mask)529 static int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr,
530 		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
531 		pgtbl_mod_mask *mask)
532 {
533 	p4d_t *p4d;
534 	unsigned long next;
535 
536 	p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
537 	if (!p4d)
538 		return -ENOMEM;
539 	do {
540 		next = p4d_addr_end(addr, end);
541 		if (vmap_pages_pud_range(p4d, addr, next, prot, pages, nr, mask))
542 			return -ENOMEM;
543 	} while (p4d++, addr = next, addr != end);
544 	return 0;
545 }
546 
vmap_small_pages_range_noflush(unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages)547 static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end,
548 		pgprot_t prot, struct page **pages)
549 {
550 	unsigned long start = addr;
551 	pgd_t *pgd;
552 	unsigned long next;
553 	int err = 0;
554 	int nr = 0;
555 	pgtbl_mod_mask mask = 0;
556 
557 	BUG_ON(addr >= end);
558 	pgd = pgd_offset_k(addr);
559 	do {
560 		next = pgd_addr_end(addr, end);
561 		if (pgd_bad(*pgd))
562 			mask |= PGTBL_PGD_MODIFIED;
563 		err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask);
564 		if (err)
565 			return err;
566 	} while (pgd++, addr = next, addr != end);
567 
568 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
569 		arch_sync_kernel_mappings(start, end);
570 
571 	return 0;
572 }
573 
574 /*
575  * vmap_pages_range_noflush is similar to vmap_pages_range, but does not
576  * flush caches.
577  *
578  * The caller is responsible for calling flush_cache_vmap() after this
579  * function returns successfully and before the addresses are accessed.
580  *
581  * This is an internal function only. Do not use outside mm/.
582  */
vmap_pages_range_noflush(unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,unsigned int page_shift)583 int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
584 		pgprot_t prot, struct page **pages, unsigned int page_shift)
585 {
586 	unsigned int i, nr = (end - addr) >> PAGE_SHIFT;
587 
588 	WARN_ON(page_shift < PAGE_SHIFT);
589 
590 	if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC) ||
591 			page_shift == PAGE_SHIFT)
592 		return vmap_small_pages_range_noflush(addr, end, prot, pages);
593 
594 	for (i = 0; i < nr; i += 1U << (page_shift - PAGE_SHIFT)) {
595 		int err;
596 
597 		err = vmap_range_noflush(addr, addr + (1UL << page_shift),
598 					__pa(page_address(pages[i])), prot,
599 					page_shift);
600 		if (err)
601 			return err;
602 
603 		addr += 1UL << page_shift;
604 	}
605 
606 	return 0;
607 }
608 
609 /**
610  * vmap_pages_range - map pages to a kernel virtual address
611  * @addr: start of the VM area to map
612  * @end: end of the VM area to map (non-inclusive)
613  * @prot: page protection flags to use
614  * @pages: pages to map (always PAGE_SIZE pages)
615  * @page_shift: maximum shift that the pages may be mapped with, @pages must
616  * be aligned and contiguous up to at least this shift.
617  *
618  * RETURNS:
619  * 0 on success, -errno on failure.
620  */
vmap_pages_range(unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,unsigned int page_shift)621 static int vmap_pages_range(unsigned long addr, unsigned long end,
622 		pgprot_t prot, struct page **pages, unsigned int page_shift)
623 {
624 	int err;
625 
626 	err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
627 	flush_cache_vmap(addr, end);
628 	return err;
629 }
630 
is_vmalloc_or_module_addr(const void * x)631 int is_vmalloc_or_module_addr(const void *x)
632 {
633 	/*
634 	 * ARM, x86-64 and sparc64 put modules in a special place,
635 	 * and fall back on vmalloc() if that fails. Others
636 	 * just put it in the vmalloc space.
637 	 */
638 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
639 	unsigned long addr = (unsigned long)kasan_reset_tag(x);
640 	if (addr >= MODULES_VADDR && addr < MODULES_END)
641 		return 1;
642 #endif
643 	return is_vmalloc_addr(x);
644 }
645 
646 /*
647  * Walk a vmap address to the struct page it maps. Huge vmap mappings will
648  * return the tail page that corresponds to the base page address, which
649  * matches small vmap mappings.
650  */
vmalloc_to_page(const void * vmalloc_addr)651 struct page *vmalloc_to_page(const void *vmalloc_addr)
652 {
653 	unsigned long addr = (unsigned long) vmalloc_addr;
654 	struct page *page = NULL;
655 	pgd_t *pgd = pgd_offset_k(addr);
656 	p4d_t *p4d;
657 	pud_t *pud;
658 	pmd_t *pmd;
659 	pte_t *ptep, pte;
660 
661 	/*
662 	 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
663 	 * architectures that do not vmalloc module space
664 	 */
665 	VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
666 
667 	if (pgd_none(*pgd))
668 		return NULL;
669 	if (WARN_ON_ONCE(pgd_leaf(*pgd)))
670 		return NULL; /* XXX: no allowance for huge pgd */
671 	if (WARN_ON_ONCE(pgd_bad(*pgd)))
672 		return NULL;
673 
674 	p4d = p4d_offset(pgd, addr);
675 	if (p4d_none(*p4d))
676 		return NULL;
677 	if (p4d_leaf(*p4d))
678 		return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT);
679 	if (WARN_ON_ONCE(p4d_bad(*p4d)))
680 		return NULL;
681 
682 	pud = pud_offset(p4d, addr);
683 	if (pud_none(*pud))
684 		return NULL;
685 	if (pud_leaf(*pud))
686 		return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
687 	if (WARN_ON_ONCE(pud_bad(*pud)))
688 		return NULL;
689 
690 	pmd = pmd_offset(pud, addr);
691 	if (pmd_none(*pmd))
692 		return NULL;
693 	if (pmd_leaf(*pmd))
694 		return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
695 	if (WARN_ON_ONCE(pmd_bad(*pmd)))
696 		return NULL;
697 
698 	ptep = pte_offset_map(pmd, addr);
699 	pte = *ptep;
700 	if (pte_present(pte))
701 		page = pte_page(pte);
702 	pte_unmap(ptep);
703 
704 	return page;
705 }
706 EXPORT_SYMBOL(vmalloc_to_page);
707 
708 /*
709  * Map a vmalloc()-space virtual address to the physical page frame number.
710  */
vmalloc_to_pfn(const void * vmalloc_addr)711 unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
712 {
713 	return page_to_pfn(vmalloc_to_page(vmalloc_addr));
714 }
715 EXPORT_SYMBOL(vmalloc_to_pfn);
716 
717 
718 /*** Global kva allocator ***/
719 
720 #define DEBUG_AUGMENT_PROPAGATE_CHECK 0
721 #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
722 
723 
724 static DEFINE_SPINLOCK(vmap_area_lock);
725 static DEFINE_SPINLOCK(free_vmap_area_lock);
726 /* Export for kexec only */
727 LIST_HEAD(vmap_area_list);
728 static struct rb_root vmap_area_root = RB_ROOT;
729 static bool vmap_initialized __read_mostly;
730 
731 static struct rb_root purge_vmap_area_root = RB_ROOT;
732 static LIST_HEAD(purge_vmap_area_list);
733 static DEFINE_SPINLOCK(purge_vmap_area_lock);
734 
735 /*
736  * This kmem_cache is used for vmap_area objects. Instead of
737  * allocating from slab we reuse an object from this cache to
738  * make things faster. Especially in "no edge" splitting of
739  * free block.
740  */
741 static struct kmem_cache *vmap_area_cachep;
742 
743 /*
744  * This linked list is used in pair with free_vmap_area_root.
745  * It gives O(1) access to prev/next to perform fast coalescing.
746  */
747 static LIST_HEAD(free_vmap_area_list);
748 
749 /*
750  * This augment red-black tree represents the free vmap space.
751  * All vmap_area objects in this tree are sorted by va->va_start
752  * address. It is used for allocation and merging when a vmap
753  * object is released.
754  *
755  * Each vmap_area node contains a maximum available free block
756  * of its sub-tree, right or left. Therefore it is possible to
757  * find a lowest match of free area.
758  */
759 static struct rb_root free_vmap_area_root = RB_ROOT;
760 
761 /*
762  * Preload a CPU with one object for "no edge" split case. The
763  * aim is to get rid of allocations from the atomic context, thus
764  * to use more permissive allocation masks.
765  */
766 static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node);
767 
768 static __always_inline unsigned long
va_size(struct vmap_area * va)769 va_size(struct vmap_area *va)
770 {
771 	return (va->va_end - va->va_start);
772 }
773 
774 static __always_inline unsigned long
get_subtree_max_size(struct rb_node * node)775 get_subtree_max_size(struct rb_node *node)
776 {
777 	struct vmap_area *va;
778 
779 	va = rb_entry_safe(node, struct vmap_area, rb_node);
780 	return va ? va->subtree_max_size : 0;
781 }
782 
783 /*
784  * Gets called when remove the node and rotate.
785  */
786 static __always_inline unsigned long
compute_subtree_max_size(struct vmap_area * va)787 compute_subtree_max_size(struct vmap_area *va)
788 {
789 	return max3(va_size(va),
790 		get_subtree_max_size(va->rb_node.rb_left),
791 		get_subtree_max_size(va->rb_node.rb_right));
792 }
793 
794 RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb,
795 	struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size)
796 
797 static void purge_vmap_area_lazy(void);
798 static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
799 static unsigned long lazy_max_pages(void);
800 
801 static atomic_long_t nr_vmalloc_pages;
802 
vmalloc_nr_pages(void)803 unsigned long vmalloc_nr_pages(void)
804 {
805 	return atomic_long_read(&nr_vmalloc_pages);
806 }
807 EXPORT_SYMBOL_GPL(vmalloc_nr_pages);
808 
find_vmap_area_exceed_addr(unsigned long addr)809 static struct vmap_area *find_vmap_area_exceed_addr(unsigned long addr)
810 {
811 	struct vmap_area *va = NULL;
812 	struct rb_node *n = vmap_area_root.rb_node;
813 
814 	addr = (unsigned long)kasan_reset_tag((void *)addr);
815 
816 	while (n) {
817 		struct vmap_area *tmp;
818 
819 		tmp = rb_entry(n, struct vmap_area, rb_node);
820 		if (tmp->va_end > addr) {
821 			va = tmp;
822 			if (tmp->va_start <= addr)
823 				break;
824 
825 			n = n->rb_left;
826 		} else
827 			n = n->rb_right;
828 	}
829 
830 	return va;
831 }
832 
__find_vmap_area(unsigned long addr)833 static struct vmap_area *__find_vmap_area(unsigned long addr)
834 {
835 	struct rb_node *n = vmap_area_root.rb_node;
836 
837 	addr = (unsigned long)kasan_reset_tag((void *)addr);
838 
839 	while (n) {
840 		struct vmap_area *va;
841 
842 		va = rb_entry(n, struct vmap_area, rb_node);
843 		if (addr < va->va_start)
844 			n = n->rb_left;
845 		else if (addr >= va->va_end)
846 			n = n->rb_right;
847 		else
848 			return va;
849 	}
850 
851 	return NULL;
852 }
853 
854 /*
855  * This function returns back addresses of parent node
856  * and its left or right link for further processing.
857  *
858  * Otherwise NULL is returned. In that case all further
859  * steps regarding inserting of conflicting overlap range
860  * have to be declined and actually considered as a bug.
861  */
862 static __always_inline struct rb_node **
find_va_links(struct vmap_area * va,struct rb_root * root,struct rb_node * from,struct rb_node ** parent)863 find_va_links(struct vmap_area *va,
864 	struct rb_root *root, struct rb_node *from,
865 	struct rb_node **parent)
866 {
867 	struct vmap_area *tmp_va;
868 	struct rb_node **link;
869 
870 	if (root) {
871 		link = &root->rb_node;
872 		if (unlikely(!*link)) {
873 			*parent = NULL;
874 			return link;
875 		}
876 	} else {
877 		link = &from;
878 	}
879 
880 	/*
881 	 * Go to the bottom of the tree. When we hit the last point
882 	 * we end up with parent rb_node and correct direction, i name
883 	 * it link, where the new va->rb_node will be attached to.
884 	 */
885 	do {
886 		tmp_va = rb_entry(*link, struct vmap_area, rb_node);
887 
888 		/*
889 		 * During the traversal we also do some sanity check.
890 		 * Trigger the BUG() if there are sides(left/right)
891 		 * or full overlaps.
892 		 */
893 		if (va->va_start < tmp_va->va_end &&
894 				va->va_end <= tmp_va->va_start)
895 			link = &(*link)->rb_left;
896 		else if (va->va_end > tmp_va->va_start &&
897 				va->va_start >= tmp_va->va_end)
898 			link = &(*link)->rb_right;
899 		else {
900 			WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n",
901 				va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end);
902 
903 			return NULL;
904 		}
905 	} while (*link);
906 
907 	*parent = &tmp_va->rb_node;
908 	return link;
909 }
910 
911 static __always_inline struct list_head *
get_va_next_sibling(struct rb_node * parent,struct rb_node ** link)912 get_va_next_sibling(struct rb_node *parent, struct rb_node **link)
913 {
914 	struct list_head *list;
915 
916 	if (unlikely(!parent))
917 		/*
918 		 * The red-black tree where we try to find VA neighbors
919 		 * before merging or inserting is empty, i.e. it means
920 		 * there is no free vmap space. Normally it does not
921 		 * happen but we handle this case anyway.
922 		 */
923 		return NULL;
924 
925 	list = &rb_entry(parent, struct vmap_area, rb_node)->list;
926 	return (&parent->rb_right == link ? list->next : list);
927 }
928 
929 static __always_inline void
link_va(struct vmap_area * va,struct rb_root * root,struct rb_node * parent,struct rb_node ** link,struct list_head * head)930 link_va(struct vmap_area *va, struct rb_root *root,
931 	struct rb_node *parent, struct rb_node **link, struct list_head *head)
932 {
933 	/*
934 	 * VA is still not in the list, but we can
935 	 * identify its future previous list_head node.
936 	 */
937 	if (likely(parent)) {
938 		head = &rb_entry(parent, struct vmap_area, rb_node)->list;
939 		if (&parent->rb_right != link)
940 			head = head->prev;
941 	}
942 
943 	/* Insert to the rb-tree */
944 	rb_link_node(&va->rb_node, parent, link);
945 	if (root == &free_vmap_area_root) {
946 		/*
947 		 * Some explanation here. Just perform simple insertion
948 		 * to the tree. We do not set va->subtree_max_size to
949 		 * its current size before calling rb_insert_augmented().
950 		 * It is because of we populate the tree from the bottom
951 		 * to parent levels when the node _is_ in the tree.
952 		 *
953 		 * Therefore we set subtree_max_size to zero after insertion,
954 		 * to let __augment_tree_propagate_from() puts everything to
955 		 * the correct order later on.
956 		 */
957 		rb_insert_augmented(&va->rb_node,
958 			root, &free_vmap_area_rb_augment_cb);
959 		va->subtree_max_size = 0;
960 	} else {
961 		rb_insert_color(&va->rb_node, root);
962 	}
963 
964 	/* Address-sort this list */
965 	list_add(&va->list, head);
966 }
967 
968 static __always_inline void
unlink_va(struct vmap_area * va,struct rb_root * root)969 unlink_va(struct vmap_area *va, struct rb_root *root)
970 {
971 	if (WARN_ON(RB_EMPTY_NODE(&va->rb_node)))
972 		return;
973 
974 	if (root == &free_vmap_area_root)
975 		rb_erase_augmented(&va->rb_node,
976 			root, &free_vmap_area_rb_augment_cb);
977 	else
978 		rb_erase(&va->rb_node, root);
979 
980 	list_del(&va->list);
981 	RB_CLEAR_NODE(&va->rb_node);
982 }
983 
984 #if DEBUG_AUGMENT_PROPAGATE_CHECK
985 static void
augment_tree_propagate_check(void)986 augment_tree_propagate_check(void)
987 {
988 	struct vmap_area *va;
989 	unsigned long computed_size;
990 
991 	list_for_each_entry(va, &free_vmap_area_list, list) {
992 		computed_size = compute_subtree_max_size(va);
993 		if (computed_size != va->subtree_max_size)
994 			pr_emerg("tree is corrupted: %lu, %lu\n",
995 				va_size(va), va->subtree_max_size);
996 	}
997 }
998 #endif
999 
1000 /*
1001  * This function populates subtree_max_size from bottom to upper
1002  * levels starting from VA point. The propagation must be done
1003  * when VA size is modified by changing its va_start/va_end. Or
1004  * in case of newly inserting of VA to the tree.
1005  *
1006  * It means that __augment_tree_propagate_from() must be called:
1007  * - After VA has been inserted to the tree(free path);
1008  * - After VA has been shrunk(allocation path);
1009  * - After VA has been increased(merging path).
1010  *
1011  * Please note that, it does not mean that upper parent nodes
1012  * and their subtree_max_size are recalculated all the time up
1013  * to the root node.
1014  *
1015  *       4--8
1016  *        /\
1017  *       /  \
1018  *      /    \
1019  *    2--2  8--8
1020  *
1021  * For example if we modify the node 4, shrinking it to 2, then
1022  * no any modification is required. If we shrink the node 2 to 1
1023  * its subtree_max_size is updated only, and set to 1. If we shrink
1024  * the node 8 to 6, then its subtree_max_size is set to 6 and parent
1025  * node becomes 4--6.
1026  */
1027 static __always_inline void
augment_tree_propagate_from(struct vmap_area * va)1028 augment_tree_propagate_from(struct vmap_area *va)
1029 {
1030 	/*
1031 	 * Populate the tree from bottom towards the root until
1032 	 * the calculated maximum available size of checked node
1033 	 * is equal to its current one.
1034 	 */
1035 	free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL);
1036 
1037 #if DEBUG_AUGMENT_PROPAGATE_CHECK
1038 	augment_tree_propagate_check();
1039 #endif
1040 }
1041 
1042 static void
insert_vmap_area(struct vmap_area * va,struct rb_root * root,struct list_head * head)1043 insert_vmap_area(struct vmap_area *va,
1044 	struct rb_root *root, struct list_head *head)
1045 {
1046 	struct rb_node **link;
1047 	struct rb_node *parent;
1048 
1049 	link = find_va_links(va, root, NULL, &parent);
1050 	if (link)
1051 		link_va(va, root, parent, link, head);
1052 }
1053 
1054 static void
insert_vmap_area_augment(struct vmap_area * va,struct rb_node * from,struct rb_root * root,struct list_head * head)1055 insert_vmap_area_augment(struct vmap_area *va,
1056 	struct rb_node *from, struct rb_root *root,
1057 	struct list_head *head)
1058 {
1059 	struct rb_node **link;
1060 	struct rb_node *parent;
1061 
1062 	if (from)
1063 		link = find_va_links(va, NULL, from, &parent);
1064 	else
1065 		link = find_va_links(va, root, NULL, &parent);
1066 
1067 	if (link) {
1068 		link_va(va, root, parent, link, head);
1069 		augment_tree_propagate_from(va);
1070 	}
1071 }
1072 
1073 /*
1074  * Merge de-allocated chunk of VA memory with previous
1075  * and next free blocks. If coalesce is not done a new
1076  * free area is inserted. If VA has been merged, it is
1077  * freed.
1078  *
1079  * Please note, it can return NULL in case of overlap
1080  * ranges, followed by WARN() report. Despite it is a
1081  * buggy behaviour, a system can be alive and keep
1082  * ongoing.
1083  */
1084 static __always_inline struct vmap_area *
merge_or_add_vmap_area(struct vmap_area * va,struct rb_root * root,struct list_head * head)1085 merge_or_add_vmap_area(struct vmap_area *va,
1086 	struct rb_root *root, struct list_head *head)
1087 {
1088 	struct vmap_area *sibling;
1089 	struct list_head *next;
1090 	struct rb_node **link;
1091 	struct rb_node *parent;
1092 	bool merged = false;
1093 
1094 	/*
1095 	 * Find a place in the tree where VA potentially will be
1096 	 * inserted, unless it is merged with its sibling/siblings.
1097 	 */
1098 	link = find_va_links(va, root, NULL, &parent);
1099 	if (!link)
1100 		return NULL;
1101 
1102 	/*
1103 	 * Get next node of VA to check if merging can be done.
1104 	 */
1105 	next = get_va_next_sibling(parent, link);
1106 	if (unlikely(next == NULL))
1107 		goto insert;
1108 
1109 	/*
1110 	 * start            end
1111 	 * |                |
1112 	 * |<------VA------>|<-----Next----->|
1113 	 *                  |                |
1114 	 *                  start            end
1115 	 */
1116 	if (next != head) {
1117 		sibling = list_entry(next, struct vmap_area, list);
1118 		if (sibling->va_start == va->va_end) {
1119 			sibling->va_start = va->va_start;
1120 
1121 			/* Free vmap_area object. */
1122 			kmem_cache_free(vmap_area_cachep, va);
1123 
1124 			/* Point to the new merged area. */
1125 			va = sibling;
1126 			merged = true;
1127 		}
1128 	}
1129 
1130 	/*
1131 	 * start            end
1132 	 * |                |
1133 	 * |<-----Prev----->|<------VA------>|
1134 	 *                  |                |
1135 	 *                  start            end
1136 	 */
1137 	if (next->prev != head) {
1138 		sibling = list_entry(next->prev, struct vmap_area, list);
1139 		if (sibling->va_end == va->va_start) {
1140 			/*
1141 			 * If both neighbors are coalesced, it is important
1142 			 * to unlink the "next" node first, followed by merging
1143 			 * with "previous" one. Otherwise the tree might not be
1144 			 * fully populated if a sibling's augmented value is
1145 			 * "normalized" because of rotation operations.
1146 			 */
1147 			if (merged)
1148 				unlink_va(va, root);
1149 
1150 			sibling->va_end = va->va_end;
1151 
1152 			/* Free vmap_area object. */
1153 			kmem_cache_free(vmap_area_cachep, va);
1154 
1155 			/* Point to the new merged area. */
1156 			va = sibling;
1157 			merged = true;
1158 		}
1159 	}
1160 
1161 insert:
1162 	if (!merged)
1163 		link_va(va, root, parent, link, head);
1164 
1165 	return va;
1166 }
1167 
1168 static __always_inline struct vmap_area *
merge_or_add_vmap_area_augment(struct vmap_area * va,struct rb_root * root,struct list_head * head)1169 merge_or_add_vmap_area_augment(struct vmap_area *va,
1170 	struct rb_root *root, struct list_head *head)
1171 {
1172 	va = merge_or_add_vmap_area(va, root, head);
1173 	if (va)
1174 		augment_tree_propagate_from(va);
1175 
1176 	return va;
1177 }
1178 
1179 static __always_inline bool
is_within_this_va(struct vmap_area * va,unsigned long size,unsigned long align,unsigned long vstart)1180 is_within_this_va(struct vmap_area *va, unsigned long size,
1181 	unsigned long align, unsigned long vstart)
1182 {
1183 	unsigned long nva_start_addr;
1184 
1185 	if (va->va_start > vstart)
1186 		nva_start_addr = ALIGN(va->va_start, align);
1187 	else
1188 		nva_start_addr = ALIGN(vstart, align);
1189 
1190 	/* Can be overflowed due to big size or alignment. */
1191 	if (nva_start_addr + size < nva_start_addr ||
1192 			nva_start_addr < vstart)
1193 		return false;
1194 
1195 	return (nva_start_addr + size <= va->va_end);
1196 }
1197 
1198 /*
1199  * Find the first free block(lowest start address) in the tree,
1200  * that will accomplish the request corresponding to passing
1201  * parameters.
1202  */
1203 static __always_inline struct vmap_area *
find_vmap_lowest_match(unsigned long size,unsigned long align,unsigned long vstart)1204 find_vmap_lowest_match(unsigned long size,
1205 	unsigned long align, unsigned long vstart)
1206 {
1207 	struct vmap_area *va;
1208 	struct rb_node *node;
1209 	unsigned long length;
1210 
1211 	/* Start from the root. */
1212 	node = free_vmap_area_root.rb_node;
1213 
1214 	/* Adjust the search size for alignment overhead. */
1215 	length = size + align - 1;
1216 
1217 	while (node) {
1218 		va = rb_entry(node, struct vmap_area, rb_node);
1219 
1220 		if (get_subtree_max_size(node->rb_left) >= length &&
1221 				vstart < va->va_start) {
1222 			node = node->rb_left;
1223 		} else {
1224 			if (is_within_this_va(va, size, align, vstart))
1225 				return va;
1226 
1227 			/*
1228 			 * Does not make sense to go deeper towards the right
1229 			 * sub-tree if it does not have a free block that is
1230 			 * equal or bigger to the requested search length.
1231 			 */
1232 			if (get_subtree_max_size(node->rb_right) >= length) {
1233 				node = node->rb_right;
1234 				continue;
1235 			}
1236 
1237 			/*
1238 			 * OK. We roll back and find the first right sub-tree,
1239 			 * that will satisfy the search criteria. It can happen
1240 			 * only once due to "vstart" restriction.
1241 			 */
1242 			while ((node = rb_parent(node))) {
1243 				va = rb_entry(node, struct vmap_area, rb_node);
1244 				if (is_within_this_va(va, size, align, vstart))
1245 					return va;
1246 
1247 				if (get_subtree_max_size(node->rb_right) >= length &&
1248 						vstart <= va->va_start) {
1249 					node = node->rb_right;
1250 					break;
1251 				}
1252 			}
1253 		}
1254 	}
1255 
1256 	return NULL;
1257 }
1258 
1259 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1260 #include <linux/random.h>
1261 
1262 static struct vmap_area *
find_vmap_lowest_linear_match(unsigned long size,unsigned long align,unsigned long vstart)1263 find_vmap_lowest_linear_match(unsigned long size,
1264 	unsigned long align, unsigned long vstart)
1265 {
1266 	struct vmap_area *va;
1267 
1268 	list_for_each_entry(va, &free_vmap_area_list, list) {
1269 		if (!is_within_this_va(va, size, align, vstart))
1270 			continue;
1271 
1272 		return va;
1273 	}
1274 
1275 	return NULL;
1276 }
1277 
1278 static void
find_vmap_lowest_match_check(unsigned long size)1279 find_vmap_lowest_match_check(unsigned long size)
1280 {
1281 	struct vmap_area *va_1, *va_2;
1282 	unsigned long vstart;
1283 	unsigned int rnd;
1284 
1285 	get_random_bytes(&rnd, sizeof(rnd));
1286 	vstart = VMALLOC_START + rnd;
1287 
1288 	va_1 = find_vmap_lowest_match(size, 1, vstart);
1289 	va_2 = find_vmap_lowest_linear_match(size, 1, vstart);
1290 
1291 	if (va_1 != va_2)
1292 		pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n",
1293 			va_1, va_2, vstart);
1294 }
1295 #endif
1296 
1297 enum fit_type {
1298 	NOTHING_FIT = 0,
1299 	FL_FIT_TYPE = 1,	/* full fit */
1300 	LE_FIT_TYPE = 2,	/* left edge fit */
1301 	RE_FIT_TYPE = 3,	/* right edge fit */
1302 	NE_FIT_TYPE = 4		/* no edge fit */
1303 };
1304 
1305 static __always_inline enum fit_type
classify_va_fit_type(struct vmap_area * va,unsigned long nva_start_addr,unsigned long size)1306 classify_va_fit_type(struct vmap_area *va,
1307 	unsigned long nva_start_addr, unsigned long size)
1308 {
1309 	enum fit_type type;
1310 
1311 	/* Check if it is within VA. */
1312 	if (nva_start_addr < va->va_start ||
1313 			nva_start_addr + size > va->va_end)
1314 		return NOTHING_FIT;
1315 
1316 	/* Now classify. */
1317 	if (va->va_start == nva_start_addr) {
1318 		if (va->va_end == nva_start_addr + size)
1319 			type = FL_FIT_TYPE;
1320 		else
1321 			type = LE_FIT_TYPE;
1322 	} else if (va->va_end == nva_start_addr + size) {
1323 		type = RE_FIT_TYPE;
1324 	} else {
1325 		type = NE_FIT_TYPE;
1326 	}
1327 
1328 	return type;
1329 }
1330 
1331 static __always_inline int
adjust_va_to_fit_type(struct vmap_area * va,unsigned long nva_start_addr,unsigned long size,enum fit_type type)1332 adjust_va_to_fit_type(struct vmap_area *va,
1333 	unsigned long nva_start_addr, unsigned long size,
1334 	enum fit_type type)
1335 {
1336 	struct vmap_area *lva = NULL;
1337 
1338 	if (type == FL_FIT_TYPE) {
1339 		/*
1340 		 * No need to split VA, it fully fits.
1341 		 *
1342 		 * |               |
1343 		 * V      NVA      V
1344 		 * |---------------|
1345 		 */
1346 		unlink_va(va, &free_vmap_area_root);
1347 		kmem_cache_free(vmap_area_cachep, va);
1348 	} else if (type == LE_FIT_TYPE) {
1349 		/*
1350 		 * Split left edge of fit VA.
1351 		 *
1352 		 * |       |
1353 		 * V  NVA  V   R
1354 		 * |-------|-------|
1355 		 */
1356 		va->va_start += size;
1357 	} else if (type == RE_FIT_TYPE) {
1358 		/*
1359 		 * Split right edge of fit VA.
1360 		 *
1361 		 *         |       |
1362 		 *     L   V  NVA  V
1363 		 * |-------|-------|
1364 		 */
1365 		va->va_end = nva_start_addr;
1366 	} else if (type == NE_FIT_TYPE) {
1367 		/*
1368 		 * Split no edge of fit VA.
1369 		 *
1370 		 *     |       |
1371 		 *   L V  NVA  V R
1372 		 * |---|-------|---|
1373 		 */
1374 		lva = __this_cpu_xchg(ne_fit_preload_node, NULL);
1375 		if (unlikely(!lva)) {
1376 			/*
1377 			 * For percpu allocator we do not do any pre-allocation
1378 			 * and leave it as it is. The reason is it most likely
1379 			 * never ends up with NE_FIT_TYPE splitting. In case of
1380 			 * percpu allocations offsets and sizes are aligned to
1381 			 * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE
1382 			 * are its main fitting cases.
1383 			 *
1384 			 * There are a few exceptions though, as an example it is
1385 			 * a first allocation (early boot up) when we have "one"
1386 			 * big free space that has to be split.
1387 			 *
1388 			 * Also we can hit this path in case of regular "vmap"
1389 			 * allocations, if "this" current CPU was not preloaded.
1390 			 * See the comment in alloc_vmap_area() why. If so, then
1391 			 * GFP_NOWAIT is used instead to get an extra object for
1392 			 * split purpose. That is rare and most time does not
1393 			 * occur.
1394 			 *
1395 			 * What happens if an allocation gets failed. Basically,
1396 			 * an "overflow" path is triggered to purge lazily freed
1397 			 * areas to free some memory, then, the "retry" path is
1398 			 * triggered to repeat one more time. See more details
1399 			 * in alloc_vmap_area() function.
1400 			 */
1401 			lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
1402 			if (!lva)
1403 				return -1;
1404 		}
1405 
1406 		/*
1407 		 * Build the remainder.
1408 		 */
1409 		lva->va_start = va->va_start;
1410 		lva->va_end = nva_start_addr;
1411 
1412 		/*
1413 		 * Shrink this VA to remaining size.
1414 		 */
1415 		va->va_start = nva_start_addr + size;
1416 	} else {
1417 		return -1;
1418 	}
1419 
1420 	if (type != FL_FIT_TYPE) {
1421 		augment_tree_propagate_from(va);
1422 
1423 		if (lva)	/* type == NE_FIT_TYPE */
1424 			insert_vmap_area_augment(lva, &va->rb_node,
1425 				&free_vmap_area_root, &free_vmap_area_list);
1426 	}
1427 
1428 	return 0;
1429 }
1430 
1431 /*
1432  * Returns a start address of the newly allocated area, if success.
1433  * Otherwise a vend is returned that indicates failure.
1434  */
1435 static __always_inline unsigned long
__alloc_vmap_area(unsigned long size,unsigned long align,unsigned long vstart,unsigned long vend)1436 __alloc_vmap_area(unsigned long size, unsigned long align,
1437 	unsigned long vstart, unsigned long vend)
1438 {
1439 	unsigned long nva_start_addr;
1440 	struct vmap_area *va;
1441 	enum fit_type type;
1442 	int ret;
1443 
1444 	va = find_vmap_lowest_match(size, align, vstart);
1445 	if (unlikely(!va))
1446 		return vend;
1447 
1448 	if (va->va_start > vstart)
1449 		nva_start_addr = ALIGN(va->va_start, align);
1450 	else
1451 		nva_start_addr = ALIGN(vstart, align);
1452 
1453 	/* Check the "vend" restriction. */
1454 	if (nva_start_addr + size > vend)
1455 		return vend;
1456 
1457 	/* Classify what we have found. */
1458 	type = classify_va_fit_type(va, nva_start_addr, size);
1459 	if (WARN_ON_ONCE(type == NOTHING_FIT))
1460 		return vend;
1461 
1462 	/* Update the free vmap_area. */
1463 	ret = adjust_va_to_fit_type(va, nva_start_addr, size, type);
1464 	if (ret)
1465 		return vend;
1466 
1467 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1468 	find_vmap_lowest_match_check(size);
1469 #endif
1470 
1471 	return nva_start_addr;
1472 }
1473 
1474 /*
1475  * Free a region of KVA allocated by alloc_vmap_area
1476  */
free_vmap_area(struct vmap_area * va)1477 static void free_vmap_area(struct vmap_area *va)
1478 {
1479 	/*
1480 	 * Remove from the busy tree/list.
1481 	 */
1482 	spin_lock(&vmap_area_lock);
1483 	unlink_va(va, &vmap_area_root);
1484 	spin_unlock(&vmap_area_lock);
1485 
1486 	/*
1487 	 * Insert/Merge it back to the free tree/list.
1488 	 */
1489 	spin_lock(&free_vmap_area_lock);
1490 	merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list);
1491 	spin_unlock(&free_vmap_area_lock);
1492 }
1493 
1494 static inline void
preload_this_cpu_lock(spinlock_t * lock,gfp_t gfp_mask,int node)1495 preload_this_cpu_lock(spinlock_t *lock, gfp_t gfp_mask, int node)
1496 {
1497 	struct vmap_area *va = NULL;
1498 
1499 	/*
1500 	 * Preload this CPU with one extra vmap_area object. It is used
1501 	 * when fit type of free area is NE_FIT_TYPE. It guarantees that
1502 	 * a CPU that does an allocation is preloaded.
1503 	 *
1504 	 * We do it in non-atomic context, thus it allows us to use more
1505 	 * permissive allocation masks to be more stable under low memory
1506 	 * condition and high memory pressure.
1507 	 */
1508 	if (!this_cpu_read(ne_fit_preload_node))
1509 		va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1510 
1511 	spin_lock(lock);
1512 
1513 	if (va && __this_cpu_cmpxchg(ne_fit_preload_node, NULL, va))
1514 		kmem_cache_free(vmap_area_cachep, va);
1515 }
1516 
1517 /*
1518  * Allocate a region of KVA of the specified size and alignment, within the
1519  * vstart and vend.
1520  */
alloc_vmap_area(unsigned long size,unsigned long align,unsigned long vstart,unsigned long vend,int node,gfp_t gfp_mask)1521 static struct vmap_area *alloc_vmap_area(unsigned long size,
1522 				unsigned long align,
1523 				unsigned long vstart, unsigned long vend,
1524 				int node, gfp_t gfp_mask)
1525 {
1526 	struct vmap_area *va;
1527 	unsigned long freed;
1528 	unsigned long addr;
1529 	int purged = 0;
1530 	int ret;
1531 
1532 	BUG_ON(!size);
1533 	BUG_ON(offset_in_page(size));
1534 	BUG_ON(!is_power_of_2(align));
1535 
1536 	if (unlikely(!vmap_initialized))
1537 		return ERR_PTR(-EBUSY);
1538 
1539 	might_sleep();
1540 	gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
1541 
1542 	va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1543 	if (unlikely(!va))
1544 		return ERR_PTR(-ENOMEM);
1545 
1546 	/*
1547 	 * Only scan the relevant parts containing pointers to other objects
1548 	 * to avoid false negatives.
1549 	 */
1550 	kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask);
1551 
1552 retry:
1553 	preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node);
1554 	addr = __alloc_vmap_area(size, align, vstart, vend);
1555 	spin_unlock(&free_vmap_area_lock);
1556 
1557 	/*
1558 	 * If an allocation fails, the "vend" address is
1559 	 * returned. Therefore trigger the overflow path.
1560 	 */
1561 	if (unlikely(addr == vend))
1562 		goto overflow;
1563 
1564 	va->va_start = addr;
1565 	va->va_end = addr + size;
1566 	va->vm = NULL;
1567 
1568 	spin_lock(&vmap_area_lock);
1569 	insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
1570 	spin_unlock(&vmap_area_lock);
1571 
1572 	BUG_ON(!IS_ALIGNED(va->va_start, align));
1573 	BUG_ON(va->va_start < vstart);
1574 	BUG_ON(va->va_end > vend);
1575 
1576 	ret = kasan_populate_vmalloc(addr, size);
1577 	if (ret) {
1578 		free_vmap_area(va);
1579 		return ERR_PTR(ret);
1580 	}
1581 
1582 	return va;
1583 
1584 overflow:
1585 	if (!purged) {
1586 		purge_vmap_area_lazy();
1587 		purged = 1;
1588 		goto retry;
1589 	}
1590 
1591 	freed = 0;
1592 	blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
1593 
1594 	if (freed > 0) {
1595 		purged = 0;
1596 		goto retry;
1597 	}
1598 
1599 	if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit())
1600 		pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
1601 			size);
1602 
1603 	kmem_cache_free(vmap_area_cachep, va);
1604 	return ERR_PTR(-EBUSY);
1605 }
1606 
register_vmap_purge_notifier(struct notifier_block * nb)1607 int register_vmap_purge_notifier(struct notifier_block *nb)
1608 {
1609 	return blocking_notifier_chain_register(&vmap_notify_list, nb);
1610 }
1611 EXPORT_SYMBOL_GPL(register_vmap_purge_notifier);
1612 
unregister_vmap_purge_notifier(struct notifier_block * nb)1613 int unregister_vmap_purge_notifier(struct notifier_block *nb)
1614 {
1615 	return blocking_notifier_chain_unregister(&vmap_notify_list, nb);
1616 }
1617 EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
1618 
1619 /*
1620  * lazy_max_pages is the maximum amount of virtual address space we gather up
1621  * before attempting to purge with a TLB flush.
1622  *
1623  * There is a tradeoff here: a larger number will cover more kernel page tables
1624  * and take slightly longer to purge, but it will linearly reduce the number of
1625  * global TLB flushes that must be performed. It would seem natural to scale
1626  * this number up linearly with the number of CPUs (because vmapping activity
1627  * could also scale linearly with the number of CPUs), however it is likely
1628  * that in practice, workloads might be constrained in other ways that mean
1629  * vmap activity will not scale linearly with CPUs. Also, I want to be
1630  * conservative and not introduce a big latency on huge systems, so go with
1631  * a less aggressive log scale. It will still be an improvement over the old
1632  * code, and it will be simple to change the scale factor if we find that it
1633  * becomes a problem on bigger systems.
1634  */
lazy_max_pages(void)1635 static unsigned long lazy_max_pages(void)
1636 {
1637 	unsigned int log;
1638 
1639 	log = fls(num_online_cpus());
1640 
1641 	return log * (32UL * 1024 * 1024 / PAGE_SIZE);
1642 }
1643 
1644 static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0);
1645 
1646 /*
1647  * Serialize vmap purging.  There is no actual critical section protected
1648  * by this look, but we want to avoid concurrent calls for performance
1649  * reasons and to make the pcpu_get_vm_areas more deterministic.
1650  */
1651 static DEFINE_MUTEX(vmap_purge_lock);
1652 
1653 /* for per-CPU blocks */
1654 static void purge_fragmented_blocks_allcpus(void);
1655 
1656 #ifdef CONFIG_X86_64
1657 /*
1658  * called before a call to iounmap() if the caller wants vm_area_struct's
1659  * immediately freed.
1660  */
set_iounmap_nonlazy(void)1661 void set_iounmap_nonlazy(void)
1662 {
1663 	atomic_long_set(&vmap_lazy_nr, lazy_max_pages()+1);
1664 }
1665 #endif /* CONFIG_X86_64 */
1666 
1667 /*
1668  * Purges all lazily-freed vmap areas.
1669  */
__purge_vmap_area_lazy(unsigned long start,unsigned long end)1670 static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
1671 {
1672 	unsigned long resched_threshold;
1673 	struct list_head local_pure_list;
1674 	struct vmap_area *va, *n_va;
1675 
1676 	lockdep_assert_held(&vmap_purge_lock);
1677 
1678 	spin_lock(&purge_vmap_area_lock);
1679 	purge_vmap_area_root = RB_ROOT;
1680 	list_replace_init(&purge_vmap_area_list, &local_pure_list);
1681 	spin_unlock(&purge_vmap_area_lock);
1682 
1683 	if (unlikely(list_empty(&local_pure_list)))
1684 		return false;
1685 
1686 	start = min(start,
1687 		list_first_entry(&local_pure_list,
1688 			struct vmap_area, list)->va_start);
1689 
1690 	end = max(end,
1691 		list_last_entry(&local_pure_list,
1692 			struct vmap_area, list)->va_end);
1693 
1694 	flush_tlb_kernel_range(start, end);
1695 	resched_threshold = lazy_max_pages() << 1;
1696 
1697 	spin_lock(&free_vmap_area_lock);
1698 	list_for_each_entry_safe(va, n_va, &local_pure_list, list) {
1699 		unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
1700 		unsigned long orig_start = va->va_start;
1701 		unsigned long orig_end = va->va_end;
1702 
1703 		/*
1704 		 * Finally insert or merge lazily-freed area. It is
1705 		 * detached and there is no need to "unlink" it from
1706 		 * anything.
1707 		 */
1708 		va = merge_or_add_vmap_area_augment(va, &free_vmap_area_root,
1709 				&free_vmap_area_list);
1710 
1711 		if (!va)
1712 			continue;
1713 
1714 		if (is_vmalloc_or_module_addr((void *)orig_start))
1715 			kasan_release_vmalloc(orig_start, orig_end,
1716 					      va->va_start, va->va_end);
1717 
1718 		atomic_long_sub(nr, &vmap_lazy_nr);
1719 
1720 		if (atomic_long_read(&vmap_lazy_nr) < resched_threshold)
1721 			cond_resched_lock(&free_vmap_area_lock);
1722 	}
1723 	spin_unlock(&free_vmap_area_lock);
1724 	return true;
1725 }
1726 
1727 /*
1728  * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
1729  * is already purging.
1730  */
try_purge_vmap_area_lazy(void)1731 static void try_purge_vmap_area_lazy(void)
1732 {
1733 	if (mutex_trylock(&vmap_purge_lock)) {
1734 		__purge_vmap_area_lazy(ULONG_MAX, 0);
1735 		mutex_unlock(&vmap_purge_lock);
1736 	}
1737 }
1738 
1739 /*
1740  * Kick off a purge of the outstanding lazy areas.
1741  */
purge_vmap_area_lazy(void)1742 static void purge_vmap_area_lazy(void)
1743 {
1744 	mutex_lock(&vmap_purge_lock);
1745 	purge_fragmented_blocks_allcpus();
1746 	__purge_vmap_area_lazy(ULONG_MAX, 0);
1747 	mutex_unlock(&vmap_purge_lock);
1748 }
1749 
1750 /*
1751  * Free a vmap area, caller ensuring that the area has been unmapped
1752  * and flush_cache_vunmap had been called for the correct range
1753  * previously.
1754  */
free_vmap_area_noflush(struct vmap_area * va)1755 static void free_vmap_area_noflush(struct vmap_area *va)
1756 {
1757 	unsigned long nr_lazy;
1758 
1759 	spin_lock(&vmap_area_lock);
1760 	unlink_va(va, &vmap_area_root);
1761 	spin_unlock(&vmap_area_lock);
1762 
1763 	nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >>
1764 				PAGE_SHIFT, &vmap_lazy_nr);
1765 
1766 	/*
1767 	 * Merge or place it to the purge tree/list.
1768 	 */
1769 	spin_lock(&purge_vmap_area_lock);
1770 	merge_or_add_vmap_area(va,
1771 		&purge_vmap_area_root, &purge_vmap_area_list);
1772 	spin_unlock(&purge_vmap_area_lock);
1773 
1774 	/* After this point, we may free va at any time */
1775 	if (unlikely(nr_lazy > lazy_max_pages()))
1776 		try_purge_vmap_area_lazy();
1777 }
1778 
1779 /*
1780  * Free and unmap a vmap area
1781  */
free_unmap_vmap_area(struct vmap_area * va)1782 static void free_unmap_vmap_area(struct vmap_area *va)
1783 {
1784 	flush_cache_vunmap(va->va_start, va->va_end);
1785 	vunmap_range_noflush(va->va_start, va->va_end);
1786 	if (debug_pagealloc_enabled_static())
1787 		flush_tlb_kernel_range(va->va_start, va->va_end);
1788 
1789 	free_vmap_area_noflush(va);
1790 }
1791 
find_vmap_area(unsigned long addr)1792 static struct vmap_area *find_vmap_area(unsigned long addr)
1793 {
1794 	struct vmap_area *va;
1795 
1796 	spin_lock(&vmap_area_lock);
1797 	va = __find_vmap_area(addr);
1798 	spin_unlock(&vmap_area_lock);
1799 
1800 	return va;
1801 }
1802 
1803 /*** Per cpu kva allocator ***/
1804 
1805 /*
1806  * vmap space is limited especially on 32 bit architectures. Ensure there is
1807  * room for at least 16 percpu vmap blocks per CPU.
1808  */
1809 /*
1810  * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
1811  * to #define VMALLOC_SPACE		(VMALLOC_END-VMALLOC_START). Guess
1812  * instead (we just need a rough idea)
1813  */
1814 #if BITS_PER_LONG == 32
1815 #define VMALLOC_SPACE		(128UL*1024*1024)
1816 #else
1817 #define VMALLOC_SPACE		(128UL*1024*1024*1024)
1818 #endif
1819 
1820 #define VMALLOC_PAGES		(VMALLOC_SPACE / PAGE_SIZE)
1821 #define VMAP_MAX_ALLOC		BITS_PER_LONG	/* 256K with 4K pages */
1822 #define VMAP_BBMAP_BITS_MAX	1024	/* 4MB with 4K pages */
1823 #define VMAP_BBMAP_BITS_MIN	(VMAP_MAX_ALLOC*2)
1824 #define VMAP_MIN(x, y)		((x) < (y) ? (x) : (y)) /* can't use min() */
1825 #define VMAP_MAX(x, y)		((x) > (y) ? (x) : (y)) /* can't use max() */
1826 #define VMAP_BBMAP_BITS		\
1827 		VMAP_MIN(VMAP_BBMAP_BITS_MAX,	\
1828 		VMAP_MAX(VMAP_BBMAP_BITS_MIN,	\
1829 			VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
1830 
1831 #define VMAP_BLOCK_SIZE		(VMAP_BBMAP_BITS * PAGE_SIZE)
1832 
1833 struct vmap_block_queue {
1834 	spinlock_t lock;
1835 	struct list_head free;
1836 };
1837 
1838 struct vmap_block {
1839 	spinlock_t lock;
1840 	struct vmap_area *va;
1841 	unsigned long free, dirty;
1842 	unsigned long dirty_min, dirty_max; /*< dirty range */
1843 	struct list_head free_list;
1844 	struct rcu_head rcu_head;
1845 	struct list_head purge;
1846 };
1847 
1848 /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
1849 static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
1850 
1851 /*
1852  * XArray of vmap blocks, indexed by address, to quickly find a vmap block
1853  * in the free path. Could get rid of this if we change the API to return a
1854  * "cookie" from alloc, to be passed to free. But no big deal yet.
1855  */
1856 static DEFINE_XARRAY(vmap_blocks);
1857 
1858 /*
1859  * We should probably have a fallback mechanism to allocate virtual memory
1860  * out of partially filled vmap blocks. However vmap block sizing should be
1861  * fairly reasonable according to the vmalloc size, so it shouldn't be a
1862  * big problem.
1863  */
1864 
addr_to_vb_idx(unsigned long addr)1865 static unsigned long addr_to_vb_idx(unsigned long addr)
1866 {
1867 	addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
1868 	addr /= VMAP_BLOCK_SIZE;
1869 	return addr;
1870 }
1871 
vmap_block_vaddr(unsigned long va_start,unsigned long pages_off)1872 static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
1873 {
1874 	unsigned long addr;
1875 
1876 	addr = va_start + (pages_off << PAGE_SHIFT);
1877 	BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
1878 	return (void *)addr;
1879 }
1880 
1881 /**
1882  * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
1883  *                  block. Of course pages number can't exceed VMAP_BBMAP_BITS
1884  * @order:    how many 2^order pages should be occupied in newly allocated block
1885  * @gfp_mask: flags for the page level allocator
1886  *
1887  * Return: virtual address in a newly allocated block or ERR_PTR(-errno)
1888  */
new_vmap_block(unsigned int order,gfp_t gfp_mask)1889 static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
1890 {
1891 	struct vmap_block_queue *vbq;
1892 	struct vmap_block *vb;
1893 	struct vmap_area *va;
1894 	unsigned long vb_idx;
1895 	int node, err;
1896 	void *vaddr;
1897 
1898 	node = numa_node_id();
1899 
1900 	vb = kmalloc_node(sizeof(struct vmap_block),
1901 			gfp_mask & GFP_RECLAIM_MASK, node);
1902 	if (unlikely(!vb))
1903 		return ERR_PTR(-ENOMEM);
1904 
1905 	va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
1906 					VMALLOC_START, VMALLOC_END,
1907 					node, gfp_mask);
1908 	if (IS_ERR(va)) {
1909 		kfree(vb);
1910 		return ERR_CAST(va);
1911 	}
1912 
1913 	vaddr = vmap_block_vaddr(va->va_start, 0);
1914 	spin_lock_init(&vb->lock);
1915 	vb->va = va;
1916 	/* At least something should be left free */
1917 	BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
1918 	vb->free = VMAP_BBMAP_BITS - (1UL << order);
1919 	vb->dirty = 0;
1920 	vb->dirty_min = VMAP_BBMAP_BITS;
1921 	vb->dirty_max = 0;
1922 	INIT_LIST_HEAD(&vb->free_list);
1923 
1924 	vb_idx = addr_to_vb_idx(va->va_start);
1925 	err = xa_insert(&vmap_blocks, vb_idx, vb, gfp_mask);
1926 	if (err) {
1927 		kfree(vb);
1928 		free_vmap_area(va);
1929 		return ERR_PTR(err);
1930 	}
1931 
1932 	vbq = &get_cpu_var(vmap_block_queue);
1933 	spin_lock(&vbq->lock);
1934 	list_add_tail_rcu(&vb->free_list, &vbq->free);
1935 	spin_unlock(&vbq->lock);
1936 	put_cpu_var(vmap_block_queue);
1937 
1938 	return vaddr;
1939 }
1940 
free_vmap_block(struct vmap_block * vb)1941 static void free_vmap_block(struct vmap_block *vb)
1942 {
1943 	struct vmap_block *tmp;
1944 
1945 	tmp = xa_erase(&vmap_blocks, addr_to_vb_idx(vb->va->va_start));
1946 	BUG_ON(tmp != vb);
1947 
1948 	free_vmap_area_noflush(vb->va);
1949 	kfree_rcu(vb, rcu_head);
1950 }
1951 
purge_fragmented_blocks(int cpu)1952 static void purge_fragmented_blocks(int cpu)
1953 {
1954 	LIST_HEAD(purge);
1955 	struct vmap_block *vb;
1956 	struct vmap_block *n_vb;
1957 	struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
1958 
1959 	rcu_read_lock();
1960 	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1961 
1962 		if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
1963 			continue;
1964 
1965 		spin_lock(&vb->lock);
1966 		if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
1967 			vb->free = 0; /* prevent further allocs after releasing lock */
1968 			vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */
1969 			vb->dirty_min = 0;
1970 			vb->dirty_max = VMAP_BBMAP_BITS;
1971 			spin_lock(&vbq->lock);
1972 			list_del_rcu(&vb->free_list);
1973 			spin_unlock(&vbq->lock);
1974 			spin_unlock(&vb->lock);
1975 			list_add_tail(&vb->purge, &purge);
1976 		} else
1977 			spin_unlock(&vb->lock);
1978 	}
1979 	rcu_read_unlock();
1980 
1981 	list_for_each_entry_safe(vb, n_vb, &purge, purge) {
1982 		list_del(&vb->purge);
1983 		free_vmap_block(vb);
1984 	}
1985 }
1986 
purge_fragmented_blocks_allcpus(void)1987 static void purge_fragmented_blocks_allcpus(void)
1988 {
1989 	int cpu;
1990 
1991 	for_each_possible_cpu(cpu)
1992 		purge_fragmented_blocks(cpu);
1993 }
1994 
vb_alloc(unsigned long size,gfp_t gfp_mask)1995 static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
1996 {
1997 	struct vmap_block_queue *vbq;
1998 	struct vmap_block *vb;
1999 	void *vaddr = NULL;
2000 	unsigned int order;
2001 
2002 	BUG_ON(offset_in_page(size));
2003 	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
2004 	if (WARN_ON(size == 0)) {
2005 		/*
2006 		 * Allocating 0 bytes isn't what caller wants since
2007 		 * get_order(0) returns funny result. Just warn and terminate
2008 		 * early.
2009 		 */
2010 		return NULL;
2011 	}
2012 	order = get_order(size);
2013 
2014 	rcu_read_lock();
2015 	vbq = &get_cpu_var(vmap_block_queue);
2016 	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
2017 		unsigned long pages_off;
2018 
2019 		spin_lock(&vb->lock);
2020 		if (vb->free < (1UL << order)) {
2021 			spin_unlock(&vb->lock);
2022 			continue;
2023 		}
2024 
2025 		pages_off = VMAP_BBMAP_BITS - vb->free;
2026 		vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
2027 		vb->free -= 1UL << order;
2028 		if (vb->free == 0) {
2029 			spin_lock(&vbq->lock);
2030 			list_del_rcu(&vb->free_list);
2031 			spin_unlock(&vbq->lock);
2032 		}
2033 
2034 		spin_unlock(&vb->lock);
2035 		break;
2036 	}
2037 
2038 	put_cpu_var(vmap_block_queue);
2039 	rcu_read_unlock();
2040 
2041 	/* Allocate new block if nothing was found */
2042 	if (!vaddr)
2043 		vaddr = new_vmap_block(order, gfp_mask);
2044 
2045 	return vaddr;
2046 }
2047 
vb_free(unsigned long addr,unsigned long size)2048 static void vb_free(unsigned long addr, unsigned long size)
2049 {
2050 	unsigned long offset;
2051 	unsigned int order;
2052 	struct vmap_block *vb;
2053 
2054 	BUG_ON(offset_in_page(size));
2055 	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
2056 
2057 	flush_cache_vunmap(addr, addr + size);
2058 
2059 	order = get_order(size);
2060 	offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT;
2061 	vb = xa_load(&vmap_blocks, addr_to_vb_idx(addr));
2062 
2063 	vunmap_range_noflush(addr, addr + size);
2064 
2065 	if (debug_pagealloc_enabled_static())
2066 		flush_tlb_kernel_range(addr, addr + size);
2067 
2068 	spin_lock(&vb->lock);
2069 
2070 	/* Expand dirty range */
2071 	vb->dirty_min = min(vb->dirty_min, offset);
2072 	vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
2073 
2074 	vb->dirty += 1UL << order;
2075 	if (vb->dirty == VMAP_BBMAP_BITS) {
2076 		BUG_ON(vb->free);
2077 		spin_unlock(&vb->lock);
2078 		free_vmap_block(vb);
2079 	} else
2080 		spin_unlock(&vb->lock);
2081 }
2082 
_vm_unmap_aliases(unsigned long start,unsigned long end,int flush)2083 static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
2084 {
2085 	int cpu;
2086 
2087 	if (unlikely(!vmap_initialized))
2088 		return;
2089 
2090 	might_sleep();
2091 
2092 	for_each_possible_cpu(cpu) {
2093 		struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
2094 		struct vmap_block *vb;
2095 
2096 		rcu_read_lock();
2097 		list_for_each_entry_rcu(vb, &vbq->free, free_list) {
2098 			spin_lock(&vb->lock);
2099 			if (vb->dirty && vb->dirty != VMAP_BBMAP_BITS) {
2100 				unsigned long va_start = vb->va->va_start;
2101 				unsigned long s, e;
2102 
2103 				s = va_start + (vb->dirty_min << PAGE_SHIFT);
2104 				e = va_start + (vb->dirty_max << PAGE_SHIFT);
2105 
2106 				start = min(s, start);
2107 				end   = max(e, end);
2108 
2109 				flush = 1;
2110 			}
2111 			spin_unlock(&vb->lock);
2112 		}
2113 		rcu_read_unlock();
2114 	}
2115 
2116 	mutex_lock(&vmap_purge_lock);
2117 	purge_fragmented_blocks_allcpus();
2118 	if (!__purge_vmap_area_lazy(start, end) && flush)
2119 		flush_tlb_kernel_range(start, end);
2120 	mutex_unlock(&vmap_purge_lock);
2121 }
2122 
2123 /**
2124  * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
2125  *
2126  * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
2127  * to amortize TLB flushing overheads. What this means is that any page you
2128  * have now, may, in a former life, have been mapped into kernel virtual
2129  * address by the vmap layer and so there might be some CPUs with TLB entries
2130  * still referencing that page (additional to the regular 1:1 kernel mapping).
2131  *
2132  * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
2133  * be sure that none of the pages we have control over will have any aliases
2134  * from the vmap layer.
2135  */
vm_unmap_aliases(void)2136 void vm_unmap_aliases(void)
2137 {
2138 	unsigned long start = ULONG_MAX, end = 0;
2139 	int flush = 0;
2140 
2141 	_vm_unmap_aliases(start, end, flush);
2142 }
2143 EXPORT_SYMBOL_GPL(vm_unmap_aliases);
2144 
2145 /**
2146  * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
2147  * @mem: the pointer returned by vm_map_ram
2148  * @count: the count passed to that vm_map_ram call (cannot unmap partial)
2149  */
vm_unmap_ram(const void * mem,unsigned int count)2150 void vm_unmap_ram(const void *mem, unsigned int count)
2151 {
2152 	unsigned long size = (unsigned long)count << PAGE_SHIFT;
2153 	unsigned long addr = (unsigned long)kasan_reset_tag(mem);
2154 	struct vmap_area *va;
2155 
2156 	might_sleep();
2157 	BUG_ON(!addr);
2158 	BUG_ON(addr < VMALLOC_START);
2159 	BUG_ON(addr > VMALLOC_END);
2160 	BUG_ON(!PAGE_ALIGNED(addr));
2161 
2162 	kasan_poison_vmalloc(mem, size);
2163 
2164 	if (likely(count <= VMAP_MAX_ALLOC)) {
2165 		debug_check_no_locks_freed(mem, size);
2166 		vb_free(addr, size);
2167 		return;
2168 	}
2169 
2170 	va = find_vmap_area(addr);
2171 	BUG_ON(!va);
2172 	debug_check_no_locks_freed((void *)va->va_start,
2173 				    (va->va_end - va->va_start));
2174 	free_unmap_vmap_area(va);
2175 }
2176 EXPORT_SYMBOL(vm_unmap_ram);
2177 
2178 /**
2179  * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
2180  * @pages: an array of pointers to the pages to be mapped
2181  * @count: number of pages
2182  * @node: prefer to allocate data structures on this node
2183  *
2184  * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
2185  * faster than vmap so it's good.  But if you mix long-life and short-life
2186  * objects with vm_map_ram(), it could consume lots of address space through
2187  * fragmentation (especially on a 32bit machine).  You could see failures in
2188  * the end.  Please use this function for short-lived objects.
2189  *
2190  * Returns: a pointer to the address that has been mapped, or %NULL on failure
2191  */
vm_map_ram(struct page ** pages,unsigned int count,int node)2192 void *vm_map_ram(struct page **pages, unsigned int count, int node)
2193 {
2194 	unsigned long size = (unsigned long)count << PAGE_SHIFT;
2195 	unsigned long addr;
2196 	void *mem;
2197 
2198 	if (likely(count <= VMAP_MAX_ALLOC)) {
2199 		mem = vb_alloc(size, GFP_KERNEL);
2200 		if (IS_ERR(mem))
2201 			return NULL;
2202 		addr = (unsigned long)mem;
2203 	} else {
2204 		struct vmap_area *va;
2205 		va = alloc_vmap_area(size, PAGE_SIZE,
2206 				VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
2207 		if (IS_ERR(va))
2208 			return NULL;
2209 
2210 		addr = va->va_start;
2211 		mem = (void *)addr;
2212 	}
2213 
2214 	if (vmap_pages_range(addr, addr + size, PAGE_KERNEL,
2215 				pages, PAGE_SHIFT) < 0) {
2216 		vm_unmap_ram(mem, count);
2217 		return NULL;
2218 	}
2219 
2220 	/*
2221 	 * Mark the pages as accessible, now that they are mapped.
2222 	 * With hardware tag-based KASAN, marking is skipped for
2223 	 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
2224 	 */
2225 	mem = kasan_unpoison_vmalloc(mem, size, KASAN_VMALLOC_PROT_NORMAL);
2226 
2227 	return mem;
2228 }
2229 EXPORT_SYMBOL(vm_map_ram);
2230 
2231 static struct vm_struct *vmlist __initdata;
2232 
vm_area_page_order(struct vm_struct * vm)2233 static inline unsigned int vm_area_page_order(struct vm_struct *vm)
2234 {
2235 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
2236 	return vm->page_order;
2237 #else
2238 	return 0;
2239 #endif
2240 }
2241 
set_vm_area_page_order(struct vm_struct * vm,unsigned int order)2242 static inline void set_vm_area_page_order(struct vm_struct *vm, unsigned int order)
2243 {
2244 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
2245 	vm->page_order = order;
2246 #else
2247 	BUG_ON(order != 0);
2248 #endif
2249 }
2250 
2251 /**
2252  * vm_area_add_early - add vmap area early during boot
2253  * @vm: vm_struct to add
2254  *
2255  * This function is used to add fixed kernel vm area to vmlist before
2256  * vmalloc_init() is called.  @vm->addr, @vm->size, and @vm->flags
2257  * should contain proper values and the other fields should be zero.
2258  *
2259  * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
2260  */
vm_area_add_early(struct vm_struct * vm)2261 void __init vm_area_add_early(struct vm_struct *vm)
2262 {
2263 	struct vm_struct *tmp, **p;
2264 
2265 	BUG_ON(vmap_initialized);
2266 	for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
2267 		if (tmp->addr >= vm->addr) {
2268 			BUG_ON(tmp->addr < vm->addr + vm->size);
2269 			break;
2270 		} else
2271 			BUG_ON(tmp->addr + tmp->size > vm->addr);
2272 	}
2273 	vm->next = *p;
2274 	*p = vm;
2275 }
2276 
2277 /**
2278  * vm_area_register_early - register vmap area early during boot
2279  * @vm: vm_struct to register
2280  * @align: requested alignment
2281  *
2282  * This function is used to register kernel vm area before
2283  * vmalloc_init() is called.  @vm->size and @vm->flags should contain
2284  * proper values on entry and other fields should be zero.  On return,
2285  * vm->addr contains the allocated address.
2286  *
2287  * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
2288  */
vm_area_register_early(struct vm_struct * vm,size_t align)2289 void __init vm_area_register_early(struct vm_struct *vm, size_t align)
2290 {
2291 	static size_t vm_init_off __initdata;
2292 	unsigned long addr;
2293 
2294 	addr = ALIGN(VMALLOC_START + vm_init_off, align);
2295 	vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
2296 
2297 	vm->addr = (void *)addr;
2298 
2299 	vm_area_add_early(vm);
2300 	kasan_populate_early_vm_area_shadow(vm->addr, vm->size);
2301 }
2302 
vmap_init_free_space(void)2303 static void vmap_init_free_space(void)
2304 {
2305 	unsigned long vmap_start = 1;
2306 	const unsigned long vmap_end = ULONG_MAX;
2307 	struct vmap_area *busy, *free;
2308 
2309 	/*
2310 	 *     B     F     B     B     B     F
2311 	 * -|-----|.....|-----|-----|-----|.....|-
2312 	 *  |           The KVA space           |
2313 	 *  |<--------------------------------->|
2314 	 */
2315 	list_for_each_entry(busy, &vmap_area_list, list) {
2316 		if (busy->va_start - vmap_start > 0) {
2317 			free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
2318 			if (!WARN_ON_ONCE(!free)) {
2319 				free->va_start = vmap_start;
2320 				free->va_end = busy->va_start;
2321 
2322 				insert_vmap_area_augment(free, NULL,
2323 					&free_vmap_area_root,
2324 						&free_vmap_area_list);
2325 			}
2326 		}
2327 
2328 		vmap_start = busy->va_end;
2329 	}
2330 
2331 	if (vmap_end - vmap_start > 0) {
2332 		free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
2333 		if (!WARN_ON_ONCE(!free)) {
2334 			free->va_start = vmap_start;
2335 			free->va_end = vmap_end;
2336 
2337 			insert_vmap_area_augment(free, NULL,
2338 				&free_vmap_area_root,
2339 					&free_vmap_area_list);
2340 		}
2341 	}
2342 }
2343 
vmalloc_init(void)2344 void __init vmalloc_init(void)
2345 {
2346 	struct vmap_area *va;
2347 	struct vm_struct *tmp;
2348 	int i;
2349 
2350 	/*
2351 	 * Create the cache for vmap_area objects.
2352 	 */
2353 	vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC);
2354 
2355 	for_each_possible_cpu(i) {
2356 		struct vmap_block_queue *vbq;
2357 		struct vfree_deferred *p;
2358 
2359 		vbq = &per_cpu(vmap_block_queue, i);
2360 		spin_lock_init(&vbq->lock);
2361 		INIT_LIST_HEAD(&vbq->free);
2362 		p = &per_cpu(vfree_deferred, i);
2363 		init_llist_head(&p->list);
2364 		INIT_WORK(&p->wq, free_work);
2365 	}
2366 
2367 	/* Import existing vmlist entries. */
2368 	for (tmp = vmlist; tmp; tmp = tmp->next) {
2369 		va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
2370 		if (WARN_ON_ONCE(!va))
2371 			continue;
2372 
2373 		va->va_start = (unsigned long)tmp->addr;
2374 		va->va_end = va->va_start + tmp->size;
2375 		va->vm = tmp;
2376 		insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
2377 	}
2378 
2379 	/*
2380 	 * Now we can initialize a free vmap space.
2381 	 */
2382 	vmap_init_free_space();
2383 	vmap_initialized = true;
2384 }
2385 
setup_vmalloc_vm_locked(struct vm_struct * vm,struct vmap_area * va,unsigned long flags,const void * caller)2386 static inline void setup_vmalloc_vm_locked(struct vm_struct *vm,
2387 	struct vmap_area *va, unsigned long flags, const void *caller)
2388 {
2389 	vm->flags = flags;
2390 	vm->addr = (void *)va->va_start;
2391 	vm->size = va->va_end - va->va_start;
2392 	vm->caller = caller;
2393 	va->vm = vm;
2394 }
2395 
setup_vmalloc_vm(struct vm_struct * vm,struct vmap_area * va,unsigned long flags,const void * caller)2396 static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
2397 			      unsigned long flags, const void *caller)
2398 {
2399 	spin_lock(&vmap_area_lock);
2400 	setup_vmalloc_vm_locked(vm, va, flags, caller);
2401 	spin_unlock(&vmap_area_lock);
2402 }
2403 
clear_vm_uninitialized_flag(struct vm_struct * vm)2404 static void clear_vm_uninitialized_flag(struct vm_struct *vm)
2405 {
2406 	/*
2407 	 * Before removing VM_UNINITIALIZED,
2408 	 * we should make sure that vm has proper values.
2409 	 * Pair with smp_rmb() in show_numa_info().
2410 	 */
2411 	smp_wmb();
2412 	vm->flags &= ~VM_UNINITIALIZED;
2413 }
2414 
__get_vm_area_node(unsigned long size,unsigned long align,unsigned long shift,unsigned long flags,unsigned long start,unsigned long end,int node,gfp_t gfp_mask,const void * caller)2415 static struct vm_struct *__get_vm_area_node(unsigned long size,
2416 		unsigned long align, unsigned long shift, unsigned long flags,
2417 		unsigned long start, unsigned long end, int node,
2418 		gfp_t gfp_mask, const void *caller)
2419 {
2420 	struct vmap_area *va;
2421 	struct vm_struct *area;
2422 	unsigned long requested_size = size;
2423 
2424 	BUG_ON(in_interrupt());
2425 	size = ALIGN(size, 1ul << shift);
2426 	if (unlikely(!size))
2427 		return NULL;
2428 
2429 	if (flags & VM_IOREMAP)
2430 		align = 1ul << clamp_t(int, get_count_order_long(size),
2431 				       PAGE_SHIFT, IOREMAP_MAX_ORDER);
2432 
2433 	area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
2434 	if (unlikely(!area))
2435 		return NULL;
2436 
2437 	if (!(flags & VM_NO_GUARD))
2438 		size += PAGE_SIZE;
2439 
2440 	va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
2441 	if (IS_ERR(va)) {
2442 		kfree(area);
2443 		return NULL;
2444 	}
2445 
2446 	setup_vmalloc_vm(area, va, flags, caller);
2447 
2448 	/*
2449 	 * Mark pages for non-VM_ALLOC mappings as accessible. Do it now as a
2450 	 * best-effort approach, as they can be mapped outside of vmalloc code.
2451 	 * For VM_ALLOC mappings, the pages are marked as accessible after
2452 	 * getting mapped in __vmalloc_node_range().
2453 	 * With hardware tag-based KASAN, marking is skipped for
2454 	 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
2455 	 */
2456 	if (!(flags & VM_ALLOC))
2457 		area->addr = kasan_unpoison_vmalloc(area->addr, requested_size,
2458 						    KASAN_VMALLOC_PROT_NORMAL);
2459 
2460 	return area;
2461 }
2462 
__get_vm_area_caller(unsigned long size,unsigned long flags,unsigned long start,unsigned long end,const void * caller)2463 struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
2464 				       unsigned long start, unsigned long end,
2465 				       const void *caller)
2466 {
2467 	return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end,
2468 				  NUMA_NO_NODE, GFP_KERNEL, caller);
2469 }
2470 
2471 /**
2472  * get_vm_area - reserve a contiguous kernel virtual area
2473  * @size:	 size of the area
2474  * @flags:	 %VM_IOREMAP for I/O mappings or VM_ALLOC
2475  *
2476  * Search an area of @size in the kernel virtual mapping area,
2477  * and reserved it for out purposes.  Returns the area descriptor
2478  * on success or %NULL on failure.
2479  *
2480  * Return: the area descriptor on success or %NULL on failure.
2481  */
get_vm_area(unsigned long size,unsigned long flags)2482 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
2483 {
2484 	return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
2485 				  VMALLOC_START, VMALLOC_END,
2486 				  NUMA_NO_NODE, GFP_KERNEL,
2487 				  __builtin_return_address(0));
2488 }
2489 
get_vm_area_caller(unsigned long size,unsigned long flags,const void * caller)2490 struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
2491 				const void *caller)
2492 {
2493 	return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
2494 				  VMALLOC_START, VMALLOC_END,
2495 				  NUMA_NO_NODE, GFP_KERNEL, caller);
2496 }
2497 
2498 /**
2499  * find_vm_area - find a continuous kernel virtual area
2500  * @addr:	  base address
2501  *
2502  * Search for the kernel VM area starting at @addr, and return it.
2503  * It is up to the caller to do all required locking to keep the returned
2504  * pointer valid.
2505  *
2506  * Return: the area descriptor on success or %NULL on failure.
2507  */
find_vm_area(const void * addr)2508 struct vm_struct *find_vm_area(const void *addr)
2509 {
2510 	struct vmap_area *va;
2511 
2512 	va = find_vmap_area((unsigned long)addr);
2513 	if (!va)
2514 		return NULL;
2515 
2516 	return va->vm;
2517 }
2518 EXPORT_SYMBOL_GPL(find_vm_area);
2519 
2520 /**
2521  * remove_vm_area - find and remove a continuous kernel virtual area
2522  * @addr:	    base address
2523  *
2524  * Search for the kernel VM area starting at @addr, and remove it.
2525  * This function returns the found VM area, but using it is NOT safe
2526  * on SMP machines, except for its size or flags.
2527  *
2528  * Return: the area descriptor on success or %NULL on failure.
2529  */
remove_vm_area(const void * addr)2530 struct vm_struct *remove_vm_area(const void *addr)
2531 {
2532 	struct vmap_area *va;
2533 
2534 	might_sleep();
2535 
2536 	spin_lock(&vmap_area_lock);
2537 	va = __find_vmap_area((unsigned long)addr);
2538 	if (va && va->vm) {
2539 		struct vm_struct *vm = va->vm;
2540 
2541 		va->vm = NULL;
2542 		spin_unlock(&vmap_area_lock);
2543 
2544 		kasan_free_module_shadow(vm);
2545 		free_unmap_vmap_area(va);
2546 
2547 		return vm;
2548 	}
2549 
2550 	spin_unlock(&vmap_area_lock);
2551 	return NULL;
2552 }
2553 
set_area_direct_map(const struct vm_struct * area,int (* set_direct_map)(struct page * page))2554 static inline void set_area_direct_map(const struct vm_struct *area,
2555 				       int (*set_direct_map)(struct page *page))
2556 {
2557 	int i;
2558 
2559 	/* HUGE_VMALLOC passes small pages to set_direct_map */
2560 	for (i = 0; i < area->nr_pages; i++)
2561 		if (page_address(area->pages[i]))
2562 			set_direct_map(area->pages[i]);
2563 }
2564 
2565 /* Handle removing and resetting vm mappings related to the vm_struct. */
vm_remove_mappings(struct vm_struct * area,int deallocate_pages)2566 static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
2567 {
2568 	unsigned long start = ULONG_MAX, end = 0;
2569 	unsigned int page_order = vm_area_page_order(area);
2570 	int flush_reset = area->flags & VM_FLUSH_RESET_PERMS;
2571 	int flush_dmap = 0;
2572 	int i;
2573 
2574 	remove_vm_area(area->addr);
2575 
2576 	/* If this is not VM_FLUSH_RESET_PERMS memory, no need for the below. */
2577 	if (!flush_reset)
2578 		return;
2579 
2580 	/*
2581 	 * If not deallocating pages, just do the flush of the VM area and
2582 	 * return.
2583 	 */
2584 	if (!deallocate_pages) {
2585 		vm_unmap_aliases();
2586 		return;
2587 	}
2588 
2589 	/*
2590 	 * If execution gets here, flush the vm mapping and reset the direct
2591 	 * map. Find the start and end range of the direct mappings to make sure
2592 	 * the vm_unmap_aliases() flush includes the direct map.
2593 	 */
2594 	for (i = 0; i < area->nr_pages; i += 1U << page_order) {
2595 		unsigned long addr = (unsigned long)page_address(area->pages[i]);
2596 		if (addr) {
2597 			unsigned long page_size;
2598 
2599 			page_size = PAGE_SIZE << page_order;
2600 			start = min(addr, start);
2601 			end = max(addr + page_size, end);
2602 			flush_dmap = 1;
2603 		}
2604 	}
2605 
2606 	/*
2607 	 * Set direct map to something invalid so that it won't be cached if
2608 	 * there are any accesses after the TLB flush, then flush the TLB and
2609 	 * reset the direct map permissions to the default.
2610 	 */
2611 	set_area_direct_map(area, set_direct_map_invalid_noflush);
2612 	_vm_unmap_aliases(start, end, flush_dmap);
2613 	set_area_direct_map(area, set_direct_map_default_noflush);
2614 }
2615 
__vunmap(const void * addr,int deallocate_pages)2616 static void __vunmap(const void *addr, int deallocate_pages)
2617 {
2618 	struct vm_struct *area;
2619 
2620 	if (!addr)
2621 		return;
2622 
2623 	if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
2624 			addr))
2625 		return;
2626 
2627 	area = find_vm_area(addr);
2628 	if (unlikely(!area)) {
2629 		WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
2630 				addr);
2631 		return;
2632 	}
2633 
2634 	debug_check_no_locks_freed(area->addr, get_vm_area_size(area));
2635 	debug_check_no_obj_freed(area->addr, get_vm_area_size(area));
2636 
2637 	kasan_poison_vmalloc(area->addr, get_vm_area_size(area));
2638 
2639 	if (IS_ENABLED(CONFIG_ARCH_HAS_IOREMAP_PHYS_HOOKS) &&
2640 	    area->flags & VM_IOREMAP)
2641 		iounmap_phys_range_hook(area->phys_addr, get_vm_area_size(area));
2642 
2643 	vm_remove_mappings(area, deallocate_pages);
2644 
2645 	if (deallocate_pages) {
2646 		unsigned int page_order = vm_area_page_order(area);
2647 		int i;
2648 
2649 		for (i = 0; i < area->nr_pages; i += 1U << page_order) {
2650 			struct page *page = area->pages[i];
2651 
2652 			BUG_ON(!page);
2653 			__free_pages(page, page_order);
2654 			cond_resched();
2655 		}
2656 		atomic_long_sub(area->nr_pages, &nr_vmalloc_pages);
2657 
2658 		kvfree(area->pages);
2659 	}
2660 
2661 	kfree(area);
2662 }
2663 
__vfree_deferred(const void * addr)2664 static inline void __vfree_deferred(const void *addr)
2665 {
2666 	/*
2667 	 * Use raw_cpu_ptr() because this can be called from preemptible
2668 	 * context. Preemption is absolutely fine here, because the llist_add()
2669 	 * implementation is lockless, so it works even if we are adding to
2670 	 * another cpu's list. schedule_work() should be fine with this too.
2671 	 */
2672 	struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
2673 
2674 	if (llist_add((struct llist_node *)addr, &p->list))
2675 		schedule_work(&p->wq);
2676 }
2677 
2678 /**
2679  * vfree_atomic - release memory allocated by vmalloc()
2680  * @addr:	  memory base address
2681  *
2682  * This one is just like vfree() but can be called in any atomic context
2683  * except NMIs.
2684  */
vfree_atomic(const void * addr)2685 void vfree_atomic(const void *addr)
2686 {
2687 	BUG_ON(in_nmi());
2688 
2689 	kmemleak_free(addr);
2690 
2691 	if (!addr)
2692 		return;
2693 	__vfree_deferred(addr);
2694 }
2695 
__vfree(const void * addr)2696 static void __vfree(const void *addr)
2697 {
2698 	if (unlikely(in_interrupt()))
2699 		__vfree_deferred(addr);
2700 	else
2701 		__vunmap(addr, 1);
2702 }
2703 
2704 /**
2705  * vfree - Release memory allocated by vmalloc()
2706  * @addr:  Memory base address
2707  *
2708  * Free the virtually continuous memory area starting at @addr, as obtained
2709  * from one of the vmalloc() family of APIs.  This will usually also free the
2710  * physical memory underlying the virtual allocation, but that memory is
2711  * reference counted, so it will not be freed until the last user goes away.
2712  *
2713  * If @addr is NULL, no operation is performed.
2714  *
2715  * Context:
2716  * May sleep if called *not* from interrupt context.
2717  * Must not be called in NMI context (strictly speaking, it could be
2718  * if we have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
2719  * conventions for vfree() arch-dependent would be a really bad idea).
2720  */
vfree(const void * addr)2721 void vfree(const void *addr)
2722 {
2723 	BUG_ON(in_nmi());
2724 
2725 	kmemleak_free(addr);
2726 
2727 	might_sleep_if(!in_interrupt());
2728 
2729 	if (!addr)
2730 		return;
2731 
2732 	__vfree(addr);
2733 }
2734 EXPORT_SYMBOL(vfree);
2735 
2736 /**
2737  * vunmap - release virtual mapping obtained by vmap()
2738  * @addr:   memory base address
2739  *
2740  * Free the virtually contiguous memory area starting at @addr,
2741  * which was created from the page array passed to vmap().
2742  *
2743  * Must not be called in interrupt context.
2744  */
vunmap(const void * addr)2745 void vunmap(const void *addr)
2746 {
2747 	BUG_ON(in_interrupt());
2748 	might_sleep();
2749 	if (addr)
2750 		__vunmap(addr, 0);
2751 }
2752 EXPORT_SYMBOL(vunmap);
2753 
2754 /**
2755  * vmap - map an array of pages into virtually contiguous space
2756  * @pages: array of page pointers
2757  * @count: number of pages to map
2758  * @flags: vm_area->flags
2759  * @prot: page protection for the mapping
2760  *
2761  * Maps @count pages from @pages into contiguous kernel virtual space.
2762  * If @flags contains %VM_MAP_PUT_PAGES the ownership of the pages array itself
2763  * (which must be kmalloc or vmalloc memory) and one reference per pages in it
2764  * are transferred from the caller to vmap(), and will be freed / dropped when
2765  * vfree() is called on the return value.
2766  *
2767  * Return: the address of the area or %NULL on failure
2768  */
vmap(struct page ** pages,unsigned int count,unsigned long flags,pgprot_t prot)2769 void *vmap(struct page **pages, unsigned int count,
2770 	   unsigned long flags, pgprot_t prot)
2771 {
2772 	struct vm_struct *area;
2773 	unsigned long addr;
2774 	unsigned long size;		/* In bytes */
2775 
2776 	might_sleep();
2777 
2778 	if (count > totalram_pages())
2779 		return NULL;
2780 
2781 	size = (unsigned long)count << PAGE_SHIFT;
2782 	area = get_vm_area_caller(size, flags, __builtin_return_address(0));
2783 	if (!area)
2784 		return NULL;
2785 
2786 	addr = (unsigned long)area->addr;
2787 	if (vmap_pages_range(addr, addr + size, pgprot_nx(prot),
2788 				pages, PAGE_SHIFT) < 0) {
2789 		vunmap(area->addr);
2790 		return NULL;
2791 	}
2792 
2793 	if (flags & VM_MAP_PUT_PAGES) {
2794 		area->pages = pages;
2795 		area->nr_pages = count;
2796 	}
2797 	return area->addr;
2798 }
2799 EXPORT_SYMBOL(vmap);
2800 
2801 #ifdef CONFIG_VMAP_PFN
2802 struct vmap_pfn_data {
2803 	unsigned long	*pfns;
2804 	pgprot_t	prot;
2805 	unsigned int	idx;
2806 };
2807 
vmap_pfn_apply(pte_t * pte,unsigned long addr,void * private)2808 static int vmap_pfn_apply(pte_t *pte, unsigned long addr, void *private)
2809 {
2810 	struct vmap_pfn_data *data = private;
2811 
2812 	if (WARN_ON_ONCE(pfn_valid(data->pfns[data->idx])))
2813 		return -EINVAL;
2814 	*pte = pte_mkspecial(pfn_pte(data->pfns[data->idx++], data->prot));
2815 	return 0;
2816 }
2817 
2818 /**
2819  * vmap_pfn - map an array of PFNs into virtually contiguous space
2820  * @pfns: array of PFNs
2821  * @count: number of pages to map
2822  * @prot: page protection for the mapping
2823  *
2824  * Maps @count PFNs from @pfns into contiguous kernel virtual space and returns
2825  * the start address of the mapping.
2826  */
vmap_pfn(unsigned long * pfns,unsigned int count,pgprot_t prot)2827 void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot)
2828 {
2829 	struct vmap_pfn_data data = { .pfns = pfns, .prot = pgprot_nx(prot) };
2830 	struct vm_struct *area;
2831 
2832 	area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP,
2833 			__builtin_return_address(0));
2834 	if (!area)
2835 		return NULL;
2836 	if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
2837 			count * PAGE_SIZE, vmap_pfn_apply, &data)) {
2838 		free_vm_area(area);
2839 		return NULL;
2840 	}
2841 
2842 	flush_cache_vmap((unsigned long)area->addr,
2843 			 (unsigned long)area->addr + count * PAGE_SIZE);
2844 
2845 	return area->addr;
2846 }
2847 EXPORT_SYMBOL_GPL(vmap_pfn);
2848 #endif /* CONFIG_VMAP_PFN */
2849 
2850 static inline unsigned int
vm_area_alloc_pages(gfp_t gfp,int nid,unsigned int order,unsigned int nr_pages,struct page ** pages)2851 vm_area_alloc_pages(gfp_t gfp, int nid,
2852 		unsigned int order, unsigned int nr_pages, struct page **pages)
2853 {
2854 	unsigned int nr_allocated = 0;
2855 	struct page *page;
2856 	int i;
2857 
2858 	/*
2859 	 * For order-0 pages we make use of bulk allocator, if
2860 	 * the page array is partly or not at all populated due
2861 	 * to fails, fallback to a single page allocator that is
2862 	 * more permissive.
2863 	 */
2864 	if (!order && nid != NUMA_NO_NODE) {
2865 		while (nr_allocated < nr_pages) {
2866 			unsigned int nr, nr_pages_request;
2867 
2868 			/*
2869 			 * A maximum allowed request is hard-coded and is 100
2870 			 * pages per call. That is done in order to prevent a
2871 			 * long preemption off scenario in the bulk-allocator
2872 			 * so the range is [1:100].
2873 			 */
2874 			nr_pages_request = min(100U, nr_pages - nr_allocated);
2875 
2876 			nr = alloc_pages_bulk_array_node(gfp, nid,
2877 				nr_pages_request, pages + nr_allocated);
2878 
2879 			nr_allocated += nr;
2880 			cond_resched();
2881 
2882 			/*
2883 			 * If zero or pages were obtained partly,
2884 			 * fallback to a single page allocator.
2885 			 */
2886 			if (nr != nr_pages_request)
2887 				break;
2888 		}
2889 	} else if (order)
2890 		/*
2891 		 * Compound pages required for remap_vmalloc_page if
2892 		 * high-order pages.
2893 		 */
2894 		gfp |= __GFP_COMP;
2895 
2896 	/* High-order pages or fallback path if "bulk" fails. */
2897 
2898 	while (nr_allocated < nr_pages) {
2899 		if (nid == NUMA_NO_NODE)
2900 			page = alloc_pages(gfp, order);
2901 		else
2902 			page = alloc_pages_node(nid, gfp, order);
2903 		if (unlikely(!page))
2904 			break;
2905 
2906 		/*
2907 		 * Careful, we allocate and map page-order pages, but
2908 		 * tracking is done per PAGE_SIZE page so as to keep the
2909 		 * vm_struct APIs independent of the physical/mapped size.
2910 		 */
2911 		for (i = 0; i < (1U << order); i++)
2912 			pages[nr_allocated + i] = page + i;
2913 
2914 		cond_resched();
2915 		nr_allocated += 1U << order;
2916 	}
2917 
2918 	return nr_allocated;
2919 }
2920 
__vmalloc_area_node(struct vm_struct * area,gfp_t gfp_mask,pgprot_t prot,unsigned int page_shift,int node)2921 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
2922 				 pgprot_t prot, unsigned int page_shift,
2923 				 int node)
2924 {
2925 	const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
2926 	unsigned long addr = (unsigned long)area->addr;
2927 	unsigned long size = get_vm_area_size(area);
2928 	unsigned long array_size;
2929 	unsigned int nr_small_pages = size >> PAGE_SHIFT;
2930 	unsigned int page_order;
2931 
2932 	array_size = (unsigned long)nr_small_pages * sizeof(struct page *);
2933 	gfp_mask |= __GFP_NOWARN;
2934 	if (!(gfp_mask & (GFP_DMA | GFP_DMA32)))
2935 		gfp_mask |= __GFP_HIGHMEM;
2936 
2937 	/* Please note that the recursion is strictly bounded. */
2938 	if (array_size > PAGE_SIZE) {
2939 		area->pages = __vmalloc_node(array_size, 1, nested_gfp, node,
2940 					area->caller);
2941 	} else {
2942 		area->pages = kmalloc_node(array_size, nested_gfp, node);
2943 	}
2944 
2945 	if (!area->pages) {
2946 		warn_alloc(gfp_mask, NULL,
2947 			"vmalloc error: size %lu, failed to allocated page array size %lu",
2948 			nr_small_pages * PAGE_SIZE, array_size);
2949 		free_vm_area(area);
2950 		return NULL;
2951 	}
2952 
2953 	set_vm_area_page_order(area, page_shift - PAGE_SHIFT);
2954 	page_order = vm_area_page_order(area);
2955 
2956 	area->nr_pages = vm_area_alloc_pages(gfp_mask, node,
2957 		page_order, nr_small_pages, area->pages);
2958 
2959 	atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
2960 
2961 	/*
2962 	 * If not enough pages were obtained to accomplish an
2963 	 * allocation request, free them via __vfree() if any.
2964 	 */
2965 	if (area->nr_pages != nr_small_pages) {
2966 		/* vm_area_alloc_pages() can also fail due to a fatal signal */
2967 		if (!fatal_signal_pending(current))
2968 			warn_alloc(gfp_mask, NULL,
2969 				"vmalloc error: size %lu, page order %u, failed to allocate pages",
2970 				area->nr_pages * PAGE_SIZE, page_order);
2971 		goto fail;
2972 	}
2973 
2974 	if (vmap_pages_range(addr, addr + size, prot, area->pages,
2975 			page_shift) < 0) {
2976 		warn_alloc(gfp_mask, NULL,
2977 			"vmalloc error: size %lu, failed to map pages",
2978 			area->nr_pages * PAGE_SIZE);
2979 		goto fail;
2980 	}
2981 
2982 	return area->addr;
2983 
2984 fail:
2985 	__vfree(area->addr);
2986 	return NULL;
2987 }
2988 
2989 /**
2990  * __vmalloc_node_range - allocate virtually contiguous memory
2991  * @size:		  allocation size
2992  * @align:		  desired alignment
2993  * @start:		  vm area range start
2994  * @end:		  vm area range end
2995  * @gfp_mask:		  flags for the page level allocator
2996  * @prot:		  protection mask for the allocated pages
2997  * @vm_flags:		  additional vm area flags (e.g. %VM_NO_GUARD)
2998  * @node:		  node to use for allocation or NUMA_NO_NODE
2999  * @caller:		  caller's return address
3000  *
3001  * Allocate enough pages to cover @size from the page level
3002  * allocator with @gfp_mask flags.  Map them into contiguous
3003  * kernel virtual space, using a pagetable protection of @prot.
3004  *
3005  * Return: the address of the area or %NULL on failure
3006  */
__vmalloc_node_range(unsigned long size,unsigned long align,unsigned long start,unsigned long end,gfp_t gfp_mask,pgprot_t prot,unsigned long vm_flags,int node,const void * caller)3007 void *__vmalloc_node_range(unsigned long size, unsigned long align,
3008 			unsigned long start, unsigned long end, gfp_t gfp_mask,
3009 			pgprot_t prot, unsigned long vm_flags, int node,
3010 			const void *caller)
3011 {
3012 	struct vm_struct *area;
3013 	void *ret;
3014 	kasan_vmalloc_flags_t kasan_flags = KASAN_VMALLOC_NONE;
3015 	unsigned long real_size = size;
3016 	unsigned long real_align = align;
3017 	unsigned int shift = PAGE_SHIFT;
3018 
3019 	if (WARN_ON_ONCE(!size))
3020 		return NULL;
3021 
3022 	if ((size >> PAGE_SHIFT) > totalram_pages()) {
3023 		warn_alloc(gfp_mask, NULL,
3024 			"vmalloc error: size %lu, exceeds total pages",
3025 			real_size);
3026 		return NULL;
3027 	}
3028 
3029 	if (vmap_allow_huge && !(vm_flags & VM_NO_HUGE_VMAP)) {
3030 		unsigned long size_per_node;
3031 
3032 		/*
3033 		 * Try huge pages. Only try for PAGE_KERNEL allocations,
3034 		 * others like modules don't yet expect huge pages in
3035 		 * their allocations due to apply_to_page_range not
3036 		 * supporting them.
3037 		 */
3038 
3039 		size_per_node = size;
3040 		if (node == NUMA_NO_NODE)
3041 			size_per_node /= num_online_nodes();
3042 		if (arch_vmap_pmd_supported(prot) && size_per_node >= PMD_SIZE)
3043 			shift = PMD_SHIFT;
3044 		else
3045 			shift = arch_vmap_pte_supported_shift(size_per_node);
3046 
3047 		align = max(real_align, 1UL << shift);
3048 		size = ALIGN(real_size, 1UL << shift);
3049 	}
3050 
3051 again:
3052 	area = __get_vm_area_node(real_size, align, shift, VM_ALLOC |
3053 				  VM_UNINITIALIZED | vm_flags, start, end, node,
3054 				  gfp_mask, caller);
3055 	if (!area) {
3056 		warn_alloc(gfp_mask, NULL,
3057 			"vmalloc error: size %lu, vm_struct allocation failed",
3058 			real_size);
3059 		goto fail;
3060 	}
3061 
3062 	/*
3063 	 * Prepare arguments for __vmalloc_area_node() and
3064 	 * kasan_unpoison_vmalloc().
3065 	 */
3066 	if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) {
3067 		if (kasan_hw_tags_enabled()) {
3068 			/*
3069 			 * Modify protection bits to allow tagging.
3070 			 * This must be done before mapping.
3071 			 */
3072 			prot = arch_vmap_pgprot_tagged(prot);
3073 
3074 			/*
3075 			 * Skip page_alloc poisoning and zeroing for physical
3076 			 * pages backing VM_ALLOC mapping. Memory is instead
3077 			 * poisoned and zeroed by kasan_unpoison_vmalloc().
3078 			 */
3079 			gfp_mask |= __GFP_SKIP_KASAN_UNPOISON | __GFP_SKIP_ZERO;
3080 		}
3081 
3082 		/* Take note that the mapping is PAGE_KERNEL. */
3083 		kasan_flags |= KASAN_VMALLOC_PROT_NORMAL;
3084 	}
3085 
3086 	/* Allocate physical pages and map them into vmalloc space. */
3087 	ret = __vmalloc_area_node(area, gfp_mask, prot, shift, node);
3088 	if (!ret)
3089 		goto fail;
3090 
3091 	/*
3092 	 * Mark the pages as accessible, now that they are mapped.
3093 	 * The condition for setting KASAN_VMALLOC_INIT should complement the
3094 	 * one in post_alloc_hook() with regards to the __GFP_SKIP_ZERO check
3095 	 * to make sure that memory is initialized under the same conditions.
3096 	 * Tag-based KASAN modes only assign tags to normal non-executable
3097 	 * allocations, see __kasan_unpoison_vmalloc().
3098 	 */
3099 	kasan_flags |= KASAN_VMALLOC_VM_ALLOC;
3100 	if (!want_init_on_free() && want_init_on_alloc(gfp_mask) &&
3101 	    (gfp_mask & __GFP_SKIP_ZERO))
3102 		kasan_flags |= KASAN_VMALLOC_INIT;
3103 	/* KASAN_VMALLOC_PROT_NORMAL already set if required. */
3104 	area->addr = kasan_unpoison_vmalloc(area->addr, real_size, kasan_flags);
3105 
3106 	/*
3107 	 * In this function, newly allocated vm_struct has VM_UNINITIALIZED
3108 	 * flag. It means that vm_struct is not fully initialized.
3109 	 * Now, it is fully initialized, so remove this flag here.
3110 	 */
3111 	clear_vm_uninitialized_flag(area);
3112 
3113 	size = PAGE_ALIGN(size);
3114 	if (!(vm_flags & VM_DEFER_KMEMLEAK))
3115 		kmemleak_vmalloc(area, size, gfp_mask);
3116 
3117 	return area->addr;
3118 
3119 fail:
3120 	if (shift > PAGE_SHIFT) {
3121 		shift = PAGE_SHIFT;
3122 		align = real_align;
3123 		size = real_size;
3124 		goto again;
3125 	}
3126 
3127 	return NULL;
3128 }
3129 
3130 /**
3131  * __vmalloc_node - allocate virtually contiguous memory
3132  * @size:	    allocation size
3133  * @align:	    desired alignment
3134  * @gfp_mask:	    flags for the page level allocator
3135  * @node:	    node to use for allocation or NUMA_NO_NODE
3136  * @caller:	    caller's return address
3137  *
3138  * Allocate enough pages to cover @size from the page level allocator with
3139  * @gfp_mask flags.  Map them into contiguous kernel virtual space.
3140  *
3141  * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
3142  * and __GFP_NOFAIL are not supported
3143  *
3144  * Any use of gfp flags outside of GFP_KERNEL should be consulted
3145  * with mm people.
3146  *
3147  * Return: pointer to the allocated memory or %NULL on error
3148  */
__vmalloc_node(unsigned long size,unsigned long align,gfp_t gfp_mask,int node,const void * caller)3149 void *__vmalloc_node(unsigned long size, unsigned long align,
3150 			    gfp_t gfp_mask, int node, const void *caller)
3151 {
3152 	return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
3153 				gfp_mask, PAGE_KERNEL, 0, node, caller);
3154 }
3155 /*
3156  * This is only for performance analysis of vmalloc and stress purpose.
3157  * It is required by vmalloc test module, therefore do not use it other
3158  * than that.
3159  */
3160 #ifdef CONFIG_TEST_VMALLOC_MODULE
3161 EXPORT_SYMBOL_GPL(__vmalloc_node);
3162 #endif
3163 
__vmalloc(unsigned long size,gfp_t gfp_mask)3164 void *__vmalloc(unsigned long size, gfp_t gfp_mask)
3165 {
3166 	return __vmalloc_node(size, 1, gfp_mask, NUMA_NO_NODE,
3167 				__builtin_return_address(0));
3168 }
3169 EXPORT_SYMBOL(__vmalloc);
3170 
3171 /**
3172  * vmalloc - allocate virtually contiguous memory
3173  * @size:    allocation size
3174  *
3175  * Allocate enough pages to cover @size from the page level
3176  * allocator and map them into contiguous kernel virtual space.
3177  *
3178  * For tight control over page level allocator and protection flags
3179  * use __vmalloc() instead.
3180  *
3181  * Return: pointer to the allocated memory or %NULL on error
3182  */
vmalloc(unsigned long size)3183 void *vmalloc(unsigned long size)
3184 {
3185 	return __vmalloc_node(size, 1, GFP_KERNEL, NUMA_NO_NODE,
3186 				__builtin_return_address(0));
3187 }
3188 EXPORT_SYMBOL(vmalloc);
3189 
3190 /**
3191  * vmalloc_no_huge - allocate virtually contiguous memory using small pages
3192  * @size:    allocation size
3193  *
3194  * Allocate enough non-huge pages to cover @size from the page level
3195  * allocator and map them into contiguous kernel virtual space.
3196  *
3197  * Return: pointer to the allocated memory or %NULL on error
3198  */
vmalloc_no_huge(unsigned long size)3199 void *vmalloc_no_huge(unsigned long size)
3200 {
3201 	return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
3202 				    GFP_KERNEL, PAGE_KERNEL, VM_NO_HUGE_VMAP,
3203 				    NUMA_NO_NODE, __builtin_return_address(0));
3204 }
3205 EXPORT_SYMBOL(vmalloc_no_huge);
3206 
3207 /**
3208  * vzalloc - allocate virtually contiguous memory with zero fill
3209  * @size:    allocation size
3210  *
3211  * Allocate enough pages to cover @size from the page level
3212  * allocator and map them into contiguous kernel virtual space.
3213  * The memory allocated is set to zero.
3214  *
3215  * For tight control over page level allocator and protection flags
3216  * use __vmalloc() instead.
3217  *
3218  * Return: pointer to the allocated memory or %NULL on error
3219  */
vzalloc(unsigned long size)3220 void *vzalloc(unsigned long size)
3221 {
3222 	return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE,
3223 				__builtin_return_address(0));
3224 }
3225 EXPORT_SYMBOL(vzalloc);
3226 
3227 /**
3228  * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
3229  * @size: allocation size
3230  *
3231  * The resulting memory area is zeroed so it can be mapped to userspace
3232  * without leaking data.
3233  *
3234  * Return: pointer to the allocated memory or %NULL on error
3235  */
vmalloc_user(unsigned long size)3236 void *vmalloc_user(unsigned long size)
3237 {
3238 	return __vmalloc_node_range(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
3239 				    GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL,
3240 				    VM_USERMAP, NUMA_NO_NODE,
3241 				    __builtin_return_address(0));
3242 }
3243 EXPORT_SYMBOL(vmalloc_user);
3244 
3245 /**
3246  * vmalloc_node - allocate memory on a specific node
3247  * @size:	  allocation size
3248  * @node:	  numa node
3249  *
3250  * Allocate enough pages to cover @size from the page level
3251  * allocator and map them into contiguous kernel virtual space.
3252  *
3253  * For tight control over page level allocator and protection flags
3254  * use __vmalloc() instead.
3255  *
3256  * Return: pointer to the allocated memory or %NULL on error
3257  */
vmalloc_node(unsigned long size,int node)3258 void *vmalloc_node(unsigned long size, int node)
3259 {
3260 	return __vmalloc_node(size, 1, GFP_KERNEL, node,
3261 			__builtin_return_address(0));
3262 }
3263 EXPORT_SYMBOL(vmalloc_node);
3264 
3265 /**
3266  * vzalloc_node - allocate memory on a specific node with zero fill
3267  * @size:	allocation size
3268  * @node:	numa node
3269  *
3270  * Allocate enough pages to cover @size from the page level
3271  * allocator and map them into contiguous kernel virtual space.
3272  * The memory allocated is set to zero.
3273  *
3274  * Return: pointer to the allocated memory or %NULL on error
3275  */
vzalloc_node(unsigned long size,int node)3276 void *vzalloc_node(unsigned long size, int node)
3277 {
3278 	return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, node,
3279 				__builtin_return_address(0));
3280 }
3281 EXPORT_SYMBOL(vzalloc_node);
3282 
3283 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
3284 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
3285 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
3286 #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
3287 #else
3288 /*
3289  * 64b systems should always have either DMA or DMA32 zones. For others
3290  * GFP_DMA32 should do the right thing and use the normal zone.
3291  */
3292 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
3293 #endif
3294 
3295 /**
3296  * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
3297  * @size:	allocation size
3298  *
3299  * Allocate enough 32bit PA addressable pages to cover @size from the
3300  * page level allocator and map them into contiguous kernel virtual space.
3301  *
3302  * Return: pointer to the allocated memory or %NULL on error
3303  */
vmalloc_32(unsigned long size)3304 void *vmalloc_32(unsigned long size)
3305 {
3306 	return __vmalloc_node(size, 1, GFP_VMALLOC32, NUMA_NO_NODE,
3307 			__builtin_return_address(0));
3308 }
3309 EXPORT_SYMBOL(vmalloc_32);
3310 
3311 /**
3312  * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
3313  * @size:	     allocation size
3314  *
3315  * The resulting memory area is 32bit addressable and zeroed so it can be
3316  * mapped to userspace without leaking data.
3317  *
3318  * Return: pointer to the allocated memory or %NULL on error
3319  */
vmalloc_32_user(unsigned long size)3320 void *vmalloc_32_user(unsigned long size)
3321 {
3322 	return __vmalloc_node_range(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
3323 				    GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
3324 				    VM_USERMAP, NUMA_NO_NODE,
3325 				    __builtin_return_address(0));
3326 }
3327 EXPORT_SYMBOL(vmalloc_32_user);
3328 
3329 /*
3330  * small helper routine , copy contents to buf from addr.
3331  * If the page is not present, fill zero.
3332  */
3333 
aligned_vread(char * buf,char * addr,unsigned long count)3334 static int aligned_vread(char *buf, char *addr, unsigned long count)
3335 {
3336 	struct page *p;
3337 	int copied = 0;
3338 
3339 	while (count) {
3340 		unsigned long offset, length;
3341 
3342 		offset = offset_in_page(addr);
3343 		length = PAGE_SIZE - offset;
3344 		if (length > count)
3345 			length = count;
3346 		p = vmalloc_to_page(addr);
3347 		/*
3348 		 * To do safe access to this _mapped_ area, we need
3349 		 * lock. But adding lock here means that we need to add
3350 		 * overhead of vmalloc()/vfree() calls for this _debug_
3351 		 * interface, rarely used. Instead of that, we'll use
3352 		 * kmap() and get small overhead in this access function.
3353 		 */
3354 		if (p) {
3355 			/* We can expect USER0 is not used -- see vread() */
3356 			void *map = kmap_atomic(p);
3357 			memcpy(buf, map + offset, length);
3358 			kunmap_atomic(map);
3359 		} else
3360 			memset(buf, 0, length);
3361 
3362 		addr += length;
3363 		buf += length;
3364 		copied += length;
3365 		count -= length;
3366 	}
3367 	return copied;
3368 }
3369 
3370 /**
3371  * vread() - read vmalloc area in a safe way.
3372  * @buf:     buffer for reading data
3373  * @addr:    vm address.
3374  * @count:   number of bytes to be read.
3375  *
3376  * This function checks that addr is a valid vmalloc'ed area, and
3377  * copy data from that area to a given buffer. If the given memory range
3378  * of [addr...addr+count) includes some valid address, data is copied to
3379  * proper area of @buf. If there are memory holes, they'll be zero-filled.
3380  * IOREMAP area is treated as memory hole and no copy is done.
3381  *
3382  * If [addr...addr+count) doesn't includes any intersects with alive
3383  * vm_struct area, returns 0. @buf should be kernel's buffer.
3384  *
3385  * Note: In usual ops, vread() is never necessary because the caller
3386  * should know vmalloc() area is valid and can use memcpy().
3387  * This is for routines which have to access vmalloc area without
3388  * any information, as /proc/kcore.
3389  *
3390  * Return: number of bytes for which addr and buf should be increased
3391  * (same number as @count) or %0 if [addr...addr+count) doesn't
3392  * include any intersection with valid vmalloc area
3393  */
vread(char * buf,char * addr,unsigned long count)3394 long vread(char *buf, char *addr, unsigned long count)
3395 {
3396 	struct vmap_area *va;
3397 	struct vm_struct *vm;
3398 	char *vaddr, *buf_start = buf;
3399 	unsigned long buflen = count;
3400 	unsigned long n;
3401 
3402 	addr = kasan_reset_tag(addr);
3403 
3404 	/* Don't allow overflow */
3405 	if ((unsigned long) addr + count < count)
3406 		count = -(unsigned long) addr;
3407 
3408 	spin_lock(&vmap_area_lock);
3409 	va = find_vmap_area_exceed_addr((unsigned long)addr);
3410 	if (!va)
3411 		goto finished;
3412 
3413 	/* no intersects with alive vmap_area */
3414 	if ((unsigned long)addr + count <= va->va_start)
3415 		goto finished;
3416 
3417 	list_for_each_entry_from(va, &vmap_area_list, list) {
3418 		if (!count)
3419 			break;
3420 
3421 		if (!va->vm)
3422 			continue;
3423 
3424 		vm = va->vm;
3425 		vaddr = (char *) vm->addr;
3426 		if (addr >= vaddr + get_vm_area_size(vm))
3427 			continue;
3428 		while (addr < vaddr) {
3429 			if (count == 0)
3430 				goto finished;
3431 			*buf = '\0';
3432 			buf++;
3433 			addr++;
3434 			count--;
3435 		}
3436 		n = vaddr + get_vm_area_size(vm) - addr;
3437 		if (n > count)
3438 			n = count;
3439 		if (!(vm->flags & VM_IOREMAP))
3440 			aligned_vread(buf, addr, n);
3441 		else /* IOREMAP area is treated as memory hole */
3442 			memset(buf, 0, n);
3443 		buf += n;
3444 		addr += n;
3445 		count -= n;
3446 	}
3447 finished:
3448 	spin_unlock(&vmap_area_lock);
3449 
3450 	if (buf == buf_start)
3451 		return 0;
3452 	/* zero-fill memory holes */
3453 	if (buf != buf_start + buflen)
3454 		memset(buf, 0, buflen - (buf - buf_start));
3455 
3456 	return buflen;
3457 }
3458 
3459 /**
3460  * remap_vmalloc_range_partial - map vmalloc pages to userspace
3461  * @vma:		vma to cover
3462  * @uaddr:		target user address to start at
3463  * @kaddr:		virtual address of vmalloc kernel memory
3464  * @pgoff:		offset from @kaddr to start at
3465  * @size:		size of map area
3466  *
3467  * Returns:	0 for success, -Exxx on failure
3468  *
3469  * This function checks that @kaddr is a valid vmalloc'ed area,
3470  * and that it is big enough to cover the range starting at
3471  * @uaddr in @vma. Will return failure if that criteria isn't
3472  * met.
3473  *
3474  * Similar to remap_pfn_range() (see mm/memory.c)
3475  */
remap_vmalloc_range_partial(struct vm_area_struct * vma,unsigned long uaddr,void * kaddr,unsigned long pgoff,unsigned long size)3476 int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
3477 				void *kaddr, unsigned long pgoff,
3478 				unsigned long size)
3479 {
3480 	struct vm_struct *area;
3481 	unsigned long off;
3482 	unsigned long end_index;
3483 
3484 	if (check_shl_overflow(pgoff, PAGE_SHIFT, &off))
3485 		return -EINVAL;
3486 
3487 	size = PAGE_ALIGN(size);
3488 
3489 	if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
3490 		return -EINVAL;
3491 
3492 	area = find_vm_area(kaddr);
3493 	if (!area)
3494 		return -EINVAL;
3495 
3496 	if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT)))
3497 		return -EINVAL;
3498 
3499 	if (check_add_overflow(size, off, &end_index) ||
3500 	    end_index > get_vm_area_size(area))
3501 		return -EINVAL;
3502 	kaddr += off;
3503 
3504 	do {
3505 		struct page *page = vmalloc_to_page(kaddr);
3506 		int ret;
3507 
3508 		ret = vm_insert_page(vma, uaddr, page);
3509 		if (ret)
3510 			return ret;
3511 
3512 		uaddr += PAGE_SIZE;
3513 		kaddr += PAGE_SIZE;
3514 		size -= PAGE_SIZE;
3515 	} while (size > 0);
3516 
3517 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
3518 
3519 	return 0;
3520 }
3521 
3522 /**
3523  * remap_vmalloc_range - map vmalloc pages to userspace
3524  * @vma:		vma to cover (map full range of vma)
3525  * @addr:		vmalloc memory
3526  * @pgoff:		number of pages into addr before first page to map
3527  *
3528  * Returns:	0 for success, -Exxx on failure
3529  *
3530  * This function checks that addr is a valid vmalloc'ed area, and
3531  * that it is big enough to cover the vma. Will return failure if
3532  * that criteria isn't met.
3533  *
3534  * Similar to remap_pfn_range() (see mm/memory.c)
3535  */
remap_vmalloc_range(struct vm_area_struct * vma,void * addr,unsigned long pgoff)3536 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
3537 						unsigned long pgoff)
3538 {
3539 	return remap_vmalloc_range_partial(vma, vma->vm_start,
3540 					   addr, pgoff,
3541 					   vma->vm_end - vma->vm_start);
3542 }
3543 EXPORT_SYMBOL(remap_vmalloc_range);
3544 
free_vm_area(struct vm_struct * area)3545 void free_vm_area(struct vm_struct *area)
3546 {
3547 	struct vm_struct *ret;
3548 	ret = remove_vm_area(area->addr);
3549 	BUG_ON(ret != area);
3550 	kfree(area);
3551 }
3552 EXPORT_SYMBOL_GPL(free_vm_area);
3553 
3554 #ifdef CONFIG_SMP
node_to_va(struct rb_node * n)3555 static struct vmap_area *node_to_va(struct rb_node *n)
3556 {
3557 	return rb_entry_safe(n, struct vmap_area, rb_node);
3558 }
3559 
3560 /**
3561  * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to
3562  * @addr: target address
3563  *
3564  * Returns: vmap_area if it is found. If there is no such area
3565  *   the first highest(reverse order) vmap_area is returned
3566  *   i.e. va->va_start < addr && va->va_end < addr or NULL
3567  *   if there are no any areas before @addr.
3568  */
3569 static struct vmap_area *
pvm_find_va_enclose_addr(unsigned long addr)3570 pvm_find_va_enclose_addr(unsigned long addr)
3571 {
3572 	struct vmap_area *va, *tmp;
3573 	struct rb_node *n;
3574 
3575 	n = free_vmap_area_root.rb_node;
3576 	va = NULL;
3577 
3578 	while (n) {
3579 		tmp = rb_entry(n, struct vmap_area, rb_node);
3580 		if (tmp->va_start <= addr) {
3581 			va = tmp;
3582 			if (tmp->va_end >= addr)
3583 				break;
3584 
3585 			n = n->rb_right;
3586 		} else {
3587 			n = n->rb_left;
3588 		}
3589 	}
3590 
3591 	return va;
3592 }
3593 
3594 /**
3595  * pvm_determine_end_from_reverse - find the highest aligned address
3596  * of free block below VMALLOC_END
3597  * @va:
3598  *   in - the VA we start the search(reverse order);
3599  *   out - the VA with the highest aligned end address.
3600  * @align: alignment for required highest address
3601  *
3602  * Returns: determined end address within vmap_area
3603  */
3604 static unsigned long
pvm_determine_end_from_reverse(struct vmap_area ** va,unsigned long align)3605 pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align)
3606 {
3607 	unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
3608 	unsigned long addr;
3609 
3610 	if (likely(*va)) {
3611 		list_for_each_entry_from_reverse((*va),
3612 				&free_vmap_area_list, list) {
3613 			addr = min((*va)->va_end & ~(align - 1), vmalloc_end);
3614 			if ((*va)->va_start < addr)
3615 				return addr;
3616 		}
3617 	}
3618 
3619 	return 0;
3620 }
3621 
3622 /**
3623  * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
3624  * @offsets: array containing offset of each area
3625  * @sizes: array containing size of each area
3626  * @nr_vms: the number of areas to allocate
3627  * @align: alignment, all entries in @offsets and @sizes must be aligned to this
3628  *
3629  * Returns: kmalloc'd vm_struct pointer array pointing to allocated
3630  *	    vm_structs on success, %NULL on failure
3631  *
3632  * Percpu allocator wants to use congruent vm areas so that it can
3633  * maintain the offsets among percpu areas.  This function allocates
3634  * congruent vmalloc areas for it with GFP_KERNEL.  These areas tend to
3635  * be scattered pretty far, distance between two areas easily going up
3636  * to gigabytes.  To avoid interacting with regular vmallocs, these
3637  * areas are allocated from top.
3638  *
3639  * Despite its complicated look, this allocator is rather simple. It
3640  * does everything top-down and scans free blocks from the end looking
3641  * for matching base. While scanning, if any of the areas do not fit the
3642  * base address is pulled down to fit the area. Scanning is repeated till
3643  * all the areas fit and then all necessary data structures are inserted
3644  * and the result is returned.
3645  */
pcpu_get_vm_areas(const unsigned long * offsets,const size_t * sizes,int nr_vms,size_t align)3646 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
3647 				     const size_t *sizes, int nr_vms,
3648 				     size_t align)
3649 {
3650 	const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
3651 	const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
3652 	struct vmap_area **vas, *va;
3653 	struct vm_struct **vms;
3654 	int area, area2, last_area, term_area;
3655 	unsigned long base, start, size, end, last_end, orig_start, orig_end;
3656 	bool purged = false;
3657 	enum fit_type type;
3658 
3659 	/* verify parameters and allocate data structures */
3660 	BUG_ON(offset_in_page(align) || !is_power_of_2(align));
3661 	for (last_area = 0, area = 0; area < nr_vms; area++) {
3662 		start = offsets[area];
3663 		end = start + sizes[area];
3664 
3665 		/* is everything aligned properly? */
3666 		BUG_ON(!IS_ALIGNED(offsets[area], align));
3667 		BUG_ON(!IS_ALIGNED(sizes[area], align));
3668 
3669 		/* detect the area with the highest address */
3670 		if (start > offsets[last_area])
3671 			last_area = area;
3672 
3673 		for (area2 = area + 1; area2 < nr_vms; area2++) {
3674 			unsigned long start2 = offsets[area2];
3675 			unsigned long end2 = start2 + sizes[area2];
3676 
3677 			BUG_ON(start2 < end && start < end2);
3678 		}
3679 	}
3680 	last_end = offsets[last_area] + sizes[last_area];
3681 
3682 	if (vmalloc_end - vmalloc_start < last_end) {
3683 		WARN_ON(true);
3684 		return NULL;
3685 	}
3686 
3687 	vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
3688 	vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
3689 	if (!vas || !vms)
3690 		goto err_free2;
3691 
3692 	for (area = 0; area < nr_vms; area++) {
3693 		vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL);
3694 		vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
3695 		if (!vas[area] || !vms[area])
3696 			goto err_free;
3697 	}
3698 retry:
3699 	spin_lock(&free_vmap_area_lock);
3700 
3701 	/* start scanning - we scan from the top, begin with the last area */
3702 	area = term_area = last_area;
3703 	start = offsets[area];
3704 	end = start + sizes[area];
3705 
3706 	va = pvm_find_va_enclose_addr(vmalloc_end);
3707 	base = pvm_determine_end_from_reverse(&va, align) - end;
3708 
3709 	while (true) {
3710 		/*
3711 		 * base might have underflowed, add last_end before
3712 		 * comparing.
3713 		 */
3714 		if (base + last_end < vmalloc_start + last_end)
3715 			goto overflow;
3716 
3717 		/*
3718 		 * Fitting base has not been found.
3719 		 */
3720 		if (va == NULL)
3721 			goto overflow;
3722 
3723 		/*
3724 		 * If required width exceeds current VA block, move
3725 		 * base downwards and then recheck.
3726 		 */
3727 		if (base + end > va->va_end) {
3728 			base = pvm_determine_end_from_reverse(&va, align) - end;
3729 			term_area = area;
3730 			continue;
3731 		}
3732 
3733 		/*
3734 		 * If this VA does not fit, move base downwards and recheck.
3735 		 */
3736 		if (base + start < va->va_start) {
3737 			va = node_to_va(rb_prev(&va->rb_node));
3738 			base = pvm_determine_end_from_reverse(&va, align) - end;
3739 			term_area = area;
3740 			continue;
3741 		}
3742 
3743 		/*
3744 		 * This area fits, move on to the previous one.  If
3745 		 * the previous one is the terminal one, we're done.
3746 		 */
3747 		area = (area + nr_vms - 1) % nr_vms;
3748 		if (area == term_area)
3749 			break;
3750 
3751 		start = offsets[area];
3752 		end = start + sizes[area];
3753 		va = pvm_find_va_enclose_addr(base + end);
3754 	}
3755 
3756 	/* we've found a fitting base, insert all va's */
3757 	for (area = 0; area < nr_vms; area++) {
3758 		int ret;
3759 
3760 		start = base + offsets[area];
3761 		size = sizes[area];
3762 
3763 		va = pvm_find_va_enclose_addr(start);
3764 		if (WARN_ON_ONCE(va == NULL))
3765 			/* It is a BUG(), but trigger recovery instead. */
3766 			goto recovery;
3767 
3768 		type = classify_va_fit_type(va, start, size);
3769 		if (WARN_ON_ONCE(type == NOTHING_FIT))
3770 			/* It is a BUG(), but trigger recovery instead. */
3771 			goto recovery;
3772 
3773 		ret = adjust_va_to_fit_type(va, start, size, type);
3774 		if (unlikely(ret))
3775 			goto recovery;
3776 
3777 		/* Allocated area. */
3778 		va = vas[area];
3779 		va->va_start = start;
3780 		va->va_end = start + size;
3781 	}
3782 
3783 	spin_unlock(&free_vmap_area_lock);
3784 
3785 	/* populate the kasan shadow space */
3786 	for (area = 0; area < nr_vms; area++) {
3787 		if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area]))
3788 			goto err_free_shadow;
3789 	}
3790 
3791 	/* insert all vm's */
3792 	spin_lock(&vmap_area_lock);
3793 	for (area = 0; area < nr_vms; area++) {
3794 		insert_vmap_area(vas[area], &vmap_area_root, &vmap_area_list);
3795 
3796 		setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC,
3797 				 pcpu_get_vm_areas);
3798 	}
3799 	spin_unlock(&vmap_area_lock);
3800 
3801 	/*
3802 	 * Mark allocated areas as accessible. Do it now as a best-effort
3803 	 * approach, as they can be mapped outside of vmalloc code.
3804 	 * With hardware tag-based KASAN, marking is skipped for
3805 	 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
3806 	 */
3807 	for (area = 0; area < nr_vms; area++)
3808 		vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr,
3809 				vms[area]->size, KASAN_VMALLOC_PROT_NORMAL);
3810 
3811 	kfree(vas);
3812 	return vms;
3813 
3814 recovery:
3815 	/*
3816 	 * Remove previously allocated areas. There is no
3817 	 * need in removing these areas from the busy tree,
3818 	 * because they are inserted only on the final step
3819 	 * and when pcpu_get_vm_areas() is success.
3820 	 */
3821 	while (area--) {
3822 		orig_start = vas[area]->va_start;
3823 		orig_end = vas[area]->va_end;
3824 		va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
3825 				&free_vmap_area_list);
3826 		if (va)
3827 			kasan_release_vmalloc(orig_start, orig_end,
3828 				va->va_start, va->va_end);
3829 		vas[area] = NULL;
3830 	}
3831 
3832 overflow:
3833 	spin_unlock(&free_vmap_area_lock);
3834 	if (!purged) {
3835 		purge_vmap_area_lazy();
3836 		purged = true;
3837 
3838 		/* Before "retry", check if we recover. */
3839 		for (area = 0; area < nr_vms; area++) {
3840 			if (vas[area])
3841 				continue;
3842 
3843 			vas[area] = kmem_cache_zalloc(
3844 				vmap_area_cachep, GFP_KERNEL);
3845 			if (!vas[area])
3846 				goto err_free;
3847 		}
3848 
3849 		goto retry;
3850 	}
3851 
3852 err_free:
3853 	for (area = 0; area < nr_vms; area++) {
3854 		if (vas[area])
3855 			kmem_cache_free(vmap_area_cachep, vas[area]);
3856 
3857 		kfree(vms[area]);
3858 	}
3859 err_free2:
3860 	kfree(vas);
3861 	kfree(vms);
3862 	return NULL;
3863 
3864 err_free_shadow:
3865 	spin_lock(&free_vmap_area_lock);
3866 	/*
3867 	 * We release all the vmalloc shadows, even the ones for regions that
3868 	 * hadn't been successfully added. This relies on kasan_release_vmalloc
3869 	 * being able to tolerate this case.
3870 	 */
3871 	for (area = 0; area < nr_vms; area++) {
3872 		orig_start = vas[area]->va_start;
3873 		orig_end = vas[area]->va_end;
3874 		va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
3875 				&free_vmap_area_list);
3876 		if (va)
3877 			kasan_release_vmalloc(orig_start, orig_end,
3878 				va->va_start, va->va_end);
3879 		vas[area] = NULL;
3880 		kfree(vms[area]);
3881 	}
3882 	spin_unlock(&free_vmap_area_lock);
3883 	kfree(vas);
3884 	kfree(vms);
3885 	return NULL;
3886 }
3887 
3888 /**
3889  * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
3890  * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
3891  * @nr_vms: the number of allocated areas
3892  *
3893  * Free vm_structs and the array allocated by pcpu_get_vm_areas().
3894  */
pcpu_free_vm_areas(struct vm_struct ** vms,int nr_vms)3895 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
3896 {
3897 	int i;
3898 
3899 	for (i = 0; i < nr_vms; i++)
3900 		free_vm_area(vms[i]);
3901 	kfree(vms);
3902 }
3903 #endif	/* CONFIG_SMP */
3904 
3905 #ifdef CONFIG_PRINTK
vmalloc_dump_obj(void * object)3906 bool vmalloc_dump_obj(void *object)
3907 {
3908 	void *objp = (void *)PAGE_ALIGN((unsigned long)object);
3909 	const void *caller;
3910 	struct vm_struct *vm;
3911 	struct vmap_area *va;
3912 	unsigned long addr;
3913 	unsigned int nr_pages;
3914 
3915 	if (!spin_trylock(&vmap_area_lock))
3916 		return false;
3917 	va = __find_vmap_area((unsigned long)objp);
3918 	if (!va) {
3919 		spin_unlock(&vmap_area_lock);
3920 		return false;
3921 	}
3922 
3923 	vm = va->vm;
3924 	if (!vm) {
3925 		spin_unlock(&vmap_area_lock);
3926 		return false;
3927 	}
3928 	addr = (unsigned long)vm->addr;
3929 	caller = vm->caller;
3930 	nr_pages = vm->nr_pages;
3931 	spin_unlock(&vmap_area_lock);
3932 	pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
3933 		nr_pages, addr, caller);
3934 	return true;
3935 }
3936 #endif
3937 
3938 #ifdef CONFIG_PROC_FS
s_start(struct seq_file * m,loff_t * pos)3939 static void *s_start(struct seq_file *m, loff_t *pos)
3940 	__acquires(&vmap_purge_lock)
3941 	__acquires(&vmap_area_lock)
3942 {
3943 	mutex_lock(&vmap_purge_lock);
3944 	spin_lock(&vmap_area_lock);
3945 
3946 	return seq_list_start(&vmap_area_list, *pos);
3947 }
3948 
s_next(struct seq_file * m,void * p,loff_t * pos)3949 static void *s_next(struct seq_file *m, void *p, loff_t *pos)
3950 {
3951 	return seq_list_next(p, &vmap_area_list, pos);
3952 }
3953 
s_stop(struct seq_file * m,void * p)3954 static void s_stop(struct seq_file *m, void *p)
3955 	__releases(&vmap_area_lock)
3956 	__releases(&vmap_purge_lock)
3957 {
3958 	spin_unlock(&vmap_area_lock);
3959 	mutex_unlock(&vmap_purge_lock);
3960 }
3961 
show_numa_info(struct seq_file * m,struct vm_struct * v)3962 static void show_numa_info(struct seq_file *m, struct vm_struct *v)
3963 {
3964 	if (IS_ENABLED(CONFIG_NUMA)) {
3965 		unsigned int nr, *counters = m->private;
3966 
3967 		if (!counters)
3968 			return;
3969 
3970 		if (v->flags & VM_UNINITIALIZED)
3971 			return;
3972 		/* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
3973 		smp_rmb();
3974 
3975 		memset(counters, 0, nr_node_ids * sizeof(unsigned int));
3976 
3977 		for (nr = 0; nr < v->nr_pages; nr++)
3978 			counters[page_to_nid(v->pages[nr])]++;
3979 
3980 		for_each_node_state(nr, N_HIGH_MEMORY)
3981 			if (counters[nr])
3982 				seq_printf(m, " N%u=%u", nr, counters[nr]);
3983 	}
3984 }
3985 
show_purge_info(struct seq_file * m)3986 static void show_purge_info(struct seq_file *m)
3987 {
3988 	struct vmap_area *va;
3989 
3990 	spin_lock(&purge_vmap_area_lock);
3991 	list_for_each_entry(va, &purge_vmap_area_list, list) {
3992 		seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
3993 			(void *)va->va_start, (void *)va->va_end,
3994 			va->va_end - va->va_start);
3995 	}
3996 	spin_unlock(&purge_vmap_area_lock);
3997 }
3998 
s_show(struct seq_file * m,void * p)3999 static int s_show(struct seq_file *m, void *p)
4000 {
4001 	struct vmap_area *va;
4002 	struct vm_struct *v;
4003 
4004 	va = list_entry(p, struct vmap_area, list);
4005 
4006 	/*
4007 	 * s_show can encounter race with remove_vm_area, !vm on behalf
4008 	 * of vmap area is being tear down or vm_map_ram allocation.
4009 	 */
4010 	if (!va->vm) {
4011 		seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
4012 			(void *)va->va_start, (void *)va->va_end,
4013 			va->va_end - va->va_start);
4014 
4015 		return 0;
4016 	}
4017 
4018 	v = va->vm;
4019 
4020 	seq_printf(m, "0x%pK-0x%pK %7ld",
4021 		v->addr, v->addr + v->size, v->size);
4022 
4023 	if (v->caller)
4024 		seq_printf(m, " %pS", v->caller);
4025 
4026 	if (v->nr_pages)
4027 		seq_printf(m, " pages=%d", v->nr_pages);
4028 
4029 	if (v->phys_addr)
4030 		seq_printf(m, " phys=%pa", &v->phys_addr);
4031 
4032 	if (v->flags & VM_IOREMAP)
4033 		seq_puts(m, " ioremap");
4034 
4035 	if (v->flags & VM_ALLOC)
4036 		seq_puts(m, " vmalloc");
4037 
4038 	if (v->flags & VM_MAP)
4039 		seq_puts(m, " vmap");
4040 
4041 	if (v->flags & VM_USERMAP)
4042 		seq_puts(m, " user");
4043 
4044 	if (v->flags & VM_DMA_COHERENT)
4045 		seq_puts(m, " dma-coherent");
4046 
4047 	if (is_vmalloc_addr(v->pages))
4048 		seq_puts(m, " vpages");
4049 
4050 	show_numa_info(m, v);
4051 	seq_putc(m, '\n');
4052 
4053 	/*
4054 	 * As a final step, dump "unpurged" areas.
4055 	 */
4056 	if (list_is_last(&va->list, &vmap_area_list))
4057 		show_purge_info(m);
4058 
4059 	return 0;
4060 }
4061 
4062 static const struct seq_operations vmalloc_op = {
4063 	.start = s_start,
4064 	.next = s_next,
4065 	.stop = s_stop,
4066 	.show = s_show,
4067 };
4068 
proc_vmalloc_init(void)4069 static int __init proc_vmalloc_init(void)
4070 {
4071 	if (IS_ENABLED(CONFIG_NUMA))
4072 		proc_create_seq_private("vmallocinfo", 0400, NULL,
4073 				&vmalloc_op,
4074 				nr_node_ids * sizeof(unsigned int), NULL);
4075 	else
4076 		proc_create_seq("vmallocinfo", 0400, NULL, &vmalloc_op);
4077 	return 0;
4078 }
4079 module_init(proc_vmalloc_init);
4080 
4081 #endif
4082