1 /*
2 * linux/mm/vmalloc.c
3 *
4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
8 * Numa awareness, Christoph Lameter, SGI, June 2005
9 */
10
11 #include <linux/vmalloc.h>
12 #include <linux/mm.h>
13 #include <linux/module.h>
14 #include <linux/highmem.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/interrupt.h>
19 #include <linux/proc_fs.h>
20 #include <linux/seq_file.h>
21 #include <linux/debugobjects.h>
22 #include <linux/kallsyms.h>
23 #include <linux/list.h>
24 #include <linux/rbtree.h>
25 #include <linux/radix-tree.h>
26 #include <linux/rcupdate.h>
27 #include <linux/pfn.h>
28 #include <linux/kmemleak.h>
29 #include <linux/atomic.h>
30 #include <linux/compiler.h>
31 #include <linux/llist.h>
32
33 #include <asm/uaccess.h>
34 #include <asm/tlbflush.h>
35 #include <asm/shmparam.h>
36
37 struct vfree_deferred {
38 struct llist_head list;
39 struct work_struct wq;
40 };
41 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
42
43 static void __vunmap(const void *, int);
44
free_work(struct work_struct * w)45 static void free_work(struct work_struct *w)
46 {
47 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
48 struct llist_node *llnode = llist_del_all(&p->list);
49 while (llnode) {
50 void *p = llnode;
51 llnode = llist_next(llnode);
52 __vunmap(p, 1);
53 }
54 }
55
56 /*** Page table manipulation functions ***/
57
vunmap_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end)58 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
59 {
60 pte_t *pte;
61
62 pte = pte_offset_kernel(pmd, addr);
63 do {
64 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
65 WARN_ON(!pte_none(ptent) && !pte_present(ptent));
66 } while (pte++, addr += PAGE_SIZE, addr != end);
67 }
68
vunmap_pmd_range(pud_t * pud,unsigned long addr,unsigned long end)69 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
70 {
71 pmd_t *pmd;
72 unsigned long next;
73
74 pmd = pmd_offset(pud, addr);
75 do {
76 next = pmd_addr_end(addr, end);
77 if (pmd_none_or_clear_bad(pmd))
78 continue;
79 vunmap_pte_range(pmd, addr, next);
80 } while (pmd++, addr = next, addr != end);
81 }
82
vunmap_pud_range(pgd_t * pgd,unsigned long addr,unsigned long end)83 static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end)
84 {
85 pud_t *pud;
86 unsigned long next;
87
88 pud = pud_offset(pgd, addr);
89 do {
90 next = pud_addr_end(addr, end);
91 if (pud_none_or_clear_bad(pud))
92 continue;
93 vunmap_pmd_range(pud, addr, next);
94 } while (pud++, addr = next, addr != end);
95 }
96
vunmap_page_range(unsigned long addr,unsigned long end)97 static void vunmap_page_range(unsigned long addr, unsigned long end)
98 {
99 pgd_t *pgd;
100 unsigned long next;
101
102 BUG_ON(addr >= end);
103 pgd = pgd_offset_k(addr);
104 do {
105 next = pgd_addr_end(addr, end);
106 if (pgd_none_or_clear_bad(pgd))
107 continue;
108 vunmap_pud_range(pgd, addr, next);
109 } while (pgd++, addr = next, addr != end);
110 }
111
vmap_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,int * nr)112 static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
113 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
114 {
115 pte_t *pte;
116
117 /*
118 * nr is a running index into the array which helps higher level
119 * callers keep track of where we're up to.
120 */
121
122 pte = pte_alloc_kernel(pmd, addr);
123 if (!pte)
124 return -ENOMEM;
125 do {
126 struct page *page = pages[*nr];
127
128 if (WARN_ON(!pte_none(*pte)))
129 return -EBUSY;
130 if (WARN_ON(!page))
131 return -ENOMEM;
132 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
133 (*nr)++;
134 } while (pte++, addr += PAGE_SIZE, addr != end);
135 return 0;
136 }
137
vmap_pmd_range(pud_t * pud,unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,int * nr)138 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
139 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
140 {
141 pmd_t *pmd;
142 unsigned long next;
143
144 pmd = pmd_alloc(&init_mm, pud, addr);
145 if (!pmd)
146 return -ENOMEM;
147 do {
148 next = pmd_addr_end(addr, end);
149 if (vmap_pte_range(pmd, addr, next, prot, pages, nr))
150 return -ENOMEM;
151 } while (pmd++, addr = next, addr != end);
152 return 0;
153 }
154
vmap_pud_range(pgd_t * pgd,unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,int * nr)155 static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
156 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
157 {
158 pud_t *pud;
159 unsigned long next;
160
161 pud = pud_alloc(&init_mm, pgd, addr);
162 if (!pud)
163 return -ENOMEM;
164 do {
165 next = pud_addr_end(addr, end);
166 if (vmap_pmd_range(pud, addr, next, prot, pages, nr))
167 return -ENOMEM;
168 } while (pud++, addr = next, addr != end);
169 return 0;
170 }
171
172 /*
173 * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and
174 * will have pfns corresponding to the "pages" array.
175 *
176 * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
177 */
vmap_page_range_noflush(unsigned long start,unsigned long end,pgprot_t prot,struct page ** pages)178 static int vmap_page_range_noflush(unsigned long start, unsigned long end,
179 pgprot_t prot, struct page **pages)
180 {
181 pgd_t *pgd;
182 unsigned long next;
183 unsigned long addr = start;
184 int err = 0;
185 int nr = 0;
186
187 BUG_ON(addr >= end);
188 pgd = pgd_offset_k(addr);
189 do {
190 next = pgd_addr_end(addr, end);
191 err = vmap_pud_range(pgd, addr, next, prot, pages, &nr);
192 if (err)
193 return err;
194 } while (pgd++, addr = next, addr != end);
195
196 return nr;
197 }
198
vmap_page_range(unsigned long start,unsigned long end,pgprot_t prot,struct page ** pages)199 static int vmap_page_range(unsigned long start, unsigned long end,
200 pgprot_t prot, struct page **pages)
201 {
202 int ret;
203
204 ret = vmap_page_range_noflush(start, end, prot, pages);
205 flush_cache_vmap(start, end);
206 return ret;
207 }
208
is_vmalloc_or_module_addr(const void * x)209 int is_vmalloc_or_module_addr(const void *x)
210 {
211 /*
212 * ARM, x86-64 and sparc64 put modules in a special place,
213 * and fall back on vmalloc() if that fails. Others
214 * just put it in the vmalloc space.
215 */
216 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
217 unsigned long addr = (unsigned long)x;
218 if (addr >= MODULES_VADDR && addr < MODULES_END)
219 return 1;
220 #endif
221 return is_vmalloc_addr(x);
222 }
223
224 /*
225 * Walk a vmap address to the struct page it maps.
226 */
vmalloc_to_page(const void * vmalloc_addr)227 struct page *vmalloc_to_page(const void *vmalloc_addr)
228 {
229 unsigned long addr = (unsigned long) vmalloc_addr;
230 struct page *page = NULL;
231 pgd_t *pgd = pgd_offset_k(addr);
232
233 /*
234 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
235 * architectures that do not vmalloc module space
236 */
237 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
238
239 if (!pgd_none(*pgd)) {
240 pud_t *pud = pud_offset(pgd, addr);
241 if (!pud_none(*pud)) {
242 pmd_t *pmd = pmd_offset(pud, addr);
243 if (!pmd_none(*pmd)) {
244 pte_t *ptep, pte;
245
246 ptep = pte_offset_map(pmd, addr);
247 pte = *ptep;
248 if (pte_present(pte))
249 page = pte_page(pte);
250 pte_unmap(ptep);
251 }
252 }
253 }
254 return page;
255 }
256 EXPORT_SYMBOL(vmalloc_to_page);
257
258 /*
259 * Map a vmalloc()-space virtual address to the physical page frame number.
260 */
vmalloc_to_pfn(const void * vmalloc_addr)261 unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
262 {
263 return page_to_pfn(vmalloc_to_page(vmalloc_addr));
264 }
265 EXPORT_SYMBOL(vmalloc_to_pfn);
266
267
268 /*** Global kva allocator ***/
269
270 #define VM_LAZY_FREE 0x01
271 #define VM_LAZY_FREEING 0x02
272 #define VM_VM_AREA 0x04
273
274 static DEFINE_SPINLOCK(vmap_area_lock);
275 /* Export for kexec only */
276 LIST_HEAD(vmap_area_list);
277 static struct rb_root vmap_area_root = RB_ROOT;
278
279 /* The vmap cache globals are protected by vmap_area_lock */
280 static struct rb_node *free_vmap_cache;
281 static unsigned long cached_hole_size;
282 static unsigned long cached_vstart;
283 static unsigned long cached_align;
284
285 static unsigned long vmap_area_pcpu_hole;
286
__find_vmap_area(unsigned long addr)287 static struct vmap_area *__find_vmap_area(unsigned long addr)
288 {
289 struct rb_node *n = vmap_area_root.rb_node;
290
291 while (n) {
292 struct vmap_area *va;
293
294 va = rb_entry(n, struct vmap_area, rb_node);
295 if (addr < va->va_start)
296 n = n->rb_left;
297 else if (addr >= va->va_end)
298 n = n->rb_right;
299 else
300 return va;
301 }
302
303 return NULL;
304 }
305
__insert_vmap_area(struct vmap_area * va)306 static void __insert_vmap_area(struct vmap_area *va)
307 {
308 struct rb_node **p = &vmap_area_root.rb_node;
309 struct rb_node *parent = NULL;
310 struct rb_node *tmp;
311
312 while (*p) {
313 struct vmap_area *tmp_va;
314
315 parent = *p;
316 tmp_va = rb_entry(parent, struct vmap_area, rb_node);
317 if (va->va_start < tmp_va->va_end)
318 p = &(*p)->rb_left;
319 else if (va->va_end > tmp_va->va_start)
320 p = &(*p)->rb_right;
321 else
322 BUG();
323 }
324
325 rb_link_node(&va->rb_node, parent, p);
326 rb_insert_color(&va->rb_node, &vmap_area_root);
327
328 /* address-sort this list */
329 tmp = rb_prev(&va->rb_node);
330 if (tmp) {
331 struct vmap_area *prev;
332 prev = rb_entry(tmp, struct vmap_area, rb_node);
333 list_add_rcu(&va->list, &prev->list);
334 } else
335 list_add_rcu(&va->list, &vmap_area_list);
336 }
337
338 static void purge_vmap_area_lazy(void);
339
340 /*
341 * Allocate a region of KVA of the specified size and alignment, within the
342 * vstart and vend.
343 */
alloc_vmap_area(unsigned long size,unsigned long align,unsigned long vstart,unsigned long vend,int node,gfp_t gfp_mask)344 static struct vmap_area *alloc_vmap_area(unsigned long size,
345 unsigned long align,
346 unsigned long vstart, unsigned long vend,
347 int node, gfp_t gfp_mask)
348 {
349 struct vmap_area *va;
350 struct rb_node *n;
351 unsigned long addr;
352 int purged = 0;
353 struct vmap_area *first;
354
355 BUG_ON(!size);
356 BUG_ON(size & ~PAGE_MASK);
357 BUG_ON(!is_power_of_2(align));
358
359 va = kmalloc_node(sizeof(struct vmap_area),
360 gfp_mask & GFP_RECLAIM_MASK, node);
361 if (unlikely(!va))
362 return ERR_PTR(-ENOMEM);
363
364 /*
365 * Only scan the relevant parts containing pointers to other objects
366 * to avoid false negatives.
367 */
368 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK);
369
370 retry:
371 spin_lock(&vmap_area_lock);
372 /*
373 * Invalidate cache if we have more permissive parameters.
374 * cached_hole_size notes the largest hole noticed _below_
375 * the vmap_area cached in free_vmap_cache: if size fits
376 * into that hole, we want to scan from vstart to reuse
377 * the hole instead of allocating above free_vmap_cache.
378 * Note that __free_vmap_area may update free_vmap_cache
379 * without updating cached_hole_size or cached_align.
380 */
381 if (!free_vmap_cache ||
382 size < cached_hole_size ||
383 vstart < cached_vstart ||
384 align < cached_align) {
385 nocache:
386 cached_hole_size = 0;
387 free_vmap_cache = NULL;
388 }
389 /* record if we encounter less permissive parameters */
390 cached_vstart = vstart;
391 cached_align = align;
392
393 /* find starting point for our search */
394 if (free_vmap_cache) {
395 first = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
396 addr = ALIGN(first->va_end, align);
397 if (addr < vstart)
398 goto nocache;
399 if (addr + size < addr)
400 goto overflow;
401
402 } else {
403 addr = ALIGN(vstart, align);
404 if (addr + size < addr)
405 goto overflow;
406
407 n = vmap_area_root.rb_node;
408 first = NULL;
409
410 while (n) {
411 struct vmap_area *tmp;
412 tmp = rb_entry(n, struct vmap_area, rb_node);
413 if (tmp->va_end >= addr) {
414 first = tmp;
415 if (tmp->va_start <= addr)
416 break;
417 n = n->rb_left;
418 } else
419 n = n->rb_right;
420 }
421
422 if (!first)
423 goto found;
424 }
425
426 /* from the starting point, walk areas until a suitable hole is found */
427 while (addr + size > first->va_start && addr + size <= vend) {
428 if (addr + cached_hole_size < first->va_start)
429 cached_hole_size = first->va_start - addr;
430 addr = ALIGN(first->va_end, align);
431 if (addr + size < addr)
432 goto overflow;
433
434 if (list_is_last(&first->list, &vmap_area_list))
435 goto found;
436
437 first = list_entry(first->list.next,
438 struct vmap_area, list);
439 }
440
441 found:
442 if (addr + size > vend)
443 goto overflow;
444
445 va->va_start = addr;
446 va->va_end = addr + size;
447 va->flags = 0;
448 __insert_vmap_area(va);
449 free_vmap_cache = &va->rb_node;
450 spin_unlock(&vmap_area_lock);
451
452 BUG_ON(va->va_start & (align-1));
453 BUG_ON(va->va_start < vstart);
454 BUG_ON(va->va_end > vend);
455
456 return va;
457
458 overflow:
459 spin_unlock(&vmap_area_lock);
460 if (!purged) {
461 purge_vmap_area_lazy();
462 purged = 1;
463 goto retry;
464 }
465 if (printk_ratelimit())
466 printk(KERN_WARNING
467 "vmap allocation for size %lu failed: "
468 "use vmalloc=<size> to increase size.\n", size);
469 kfree(va);
470 return ERR_PTR(-EBUSY);
471 }
472
__free_vmap_area(struct vmap_area * va)473 static void __free_vmap_area(struct vmap_area *va)
474 {
475 BUG_ON(RB_EMPTY_NODE(&va->rb_node));
476
477 if (free_vmap_cache) {
478 if (va->va_end < cached_vstart) {
479 free_vmap_cache = NULL;
480 } else {
481 struct vmap_area *cache;
482 cache = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
483 if (va->va_start <= cache->va_start) {
484 free_vmap_cache = rb_prev(&va->rb_node);
485 /*
486 * We don't try to update cached_hole_size or
487 * cached_align, but it won't go very wrong.
488 */
489 }
490 }
491 }
492 rb_erase(&va->rb_node, &vmap_area_root);
493 RB_CLEAR_NODE(&va->rb_node);
494 list_del_rcu(&va->list);
495
496 /*
497 * Track the highest possible candidate for pcpu area
498 * allocation. Areas outside of vmalloc area can be returned
499 * here too, consider only end addresses which fall inside
500 * vmalloc area proper.
501 */
502 if (va->va_end > VMALLOC_START && va->va_end <= VMALLOC_END)
503 vmap_area_pcpu_hole = max(vmap_area_pcpu_hole, va->va_end);
504
505 kfree_rcu(va, rcu_head);
506 }
507
508 /*
509 * Free a region of KVA allocated by alloc_vmap_area
510 */
free_vmap_area(struct vmap_area * va)511 static void free_vmap_area(struct vmap_area *va)
512 {
513 spin_lock(&vmap_area_lock);
514 __free_vmap_area(va);
515 spin_unlock(&vmap_area_lock);
516 }
517
518 /*
519 * Clear the pagetable entries of a given vmap_area
520 */
unmap_vmap_area(struct vmap_area * va)521 static void unmap_vmap_area(struct vmap_area *va)
522 {
523 vunmap_page_range(va->va_start, va->va_end);
524 }
525
vmap_debug_free_range(unsigned long start,unsigned long end)526 static void vmap_debug_free_range(unsigned long start, unsigned long end)
527 {
528 /*
529 * Unmap page tables and force a TLB flush immediately if
530 * CONFIG_DEBUG_PAGEALLOC is set. This catches use after free
531 * bugs similarly to those in linear kernel virtual address
532 * space after a page has been freed.
533 *
534 * All the lazy freeing logic is still retained, in order to
535 * minimise intrusiveness of this debugging feature.
536 *
537 * This is going to be *slow* (linear kernel virtual address
538 * debugging doesn't do a broadcast TLB flush so it is a lot
539 * faster).
540 */
541 #ifdef CONFIG_DEBUG_PAGEALLOC
542 vunmap_page_range(start, end);
543 flush_tlb_kernel_range(start, end);
544 #endif
545 }
546
547 /*
548 * lazy_max_pages is the maximum amount of virtual address space we gather up
549 * before attempting to purge with a TLB flush.
550 *
551 * There is a tradeoff here: a larger number will cover more kernel page tables
552 * and take slightly longer to purge, but it will linearly reduce the number of
553 * global TLB flushes that must be performed. It would seem natural to scale
554 * this number up linearly with the number of CPUs (because vmapping activity
555 * could also scale linearly with the number of CPUs), however it is likely
556 * that in practice, workloads might be constrained in other ways that mean
557 * vmap activity will not scale linearly with CPUs. Also, I want to be
558 * conservative and not introduce a big latency on huge systems, so go with
559 * a less aggressive log scale. It will still be an improvement over the old
560 * code, and it will be simple to change the scale factor if we find that it
561 * becomes a problem on bigger systems.
562 */
lazy_max_pages(void)563 static unsigned long lazy_max_pages(void)
564 {
565 unsigned int log;
566
567 log = fls(num_online_cpus());
568
569 return log * (32UL * 1024 * 1024 / PAGE_SIZE);
570 }
571
572 static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
573
574 /* for per-CPU blocks */
575 static void purge_fragmented_blocks_allcpus(void);
576
577 /*
578 * called before a call to iounmap() if the caller wants vm_area_struct's
579 * immediately freed.
580 */
set_iounmap_nonlazy(void)581 void set_iounmap_nonlazy(void)
582 {
583 atomic_set(&vmap_lazy_nr, lazy_max_pages()+1);
584 }
585
586 /*
587 * Purges all lazily-freed vmap areas.
588 *
589 * If sync is 0 then don't purge if there is already a purge in progress.
590 * If force_flush is 1, then flush kernel TLBs between *start and *end even
591 * if we found no lazy vmap areas to unmap (callers can use this to optimise
592 * their own TLB flushing).
593 * Returns with *start = min(*start, lowest purged address)
594 * *end = max(*end, highest purged address)
595 */
__purge_vmap_area_lazy(unsigned long * start,unsigned long * end,int sync,int force_flush)596 static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
597 int sync, int force_flush)
598 {
599 static DEFINE_SPINLOCK(purge_lock);
600 LIST_HEAD(valist);
601 struct vmap_area *va;
602 struct vmap_area *n_va;
603 int nr = 0;
604
605 /*
606 * If sync is 0 but force_flush is 1, we'll go sync anyway but callers
607 * should not expect such behaviour. This just simplifies locking for
608 * the case that isn't actually used at the moment anyway.
609 */
610 if (!sync && !force_flush) {
611 if (!spin_trylock(&purge_lock))
612 return;
613 } else
614 spin_lock(&purge_lock);
615
616 if (sync)
617 purge_fragmented_blocks_allcpus();
618
619 rcu_read_lock();
620 list_for_each_entry_rcu(va, &vmap_area_list, list) {
621 if (va->flags & VM_LAZY_FREE) {
622 if (va->va_start < *start)
623 *start = va->va_start;
624 if (va->va_end > *end)
625 *end = va->va_end;
626 nr += (va->va_end - va->va_start) >> PAGE_SHIFT;
627 list_add_tail(&va->purge_list, &valist);
628 va->flags |= VM_LAZY_FREEING;
629 va->flags &= ~VM_LAZY_FREE;
630 }
631 }
632 rcu_read_unlock();
633
634 if (nr)
635 atomic_sub(nr, &vmap_lazy_nr);
636
637 if (nr || force_flush)
638 flush_tlb_kernel_range(*start, *end);
639
640 if (nr) {
641 spin_lock(&vmap_area_lock);
642 list_for_each_entry_safe(va, n_va, &valist, purge_list)
643 __free_vmap_area(va);
644 spin_unlock(&vmap_area_lock);
645 }
646 spin_unlock(&purge_lock);
647 }
648
649 /*
650 * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
651 * is already purging.
652 */
try_purge_vmap_area_lazy(void)653 static void try_purge_vmap_area_lazy(void)
654 {
655 unsigned long start = ULONG_MAX, end = 0;
656
657 __purge_vmap_area_lazy(&start, &end, 0, 0);
658 }
659
660 /*
661 * Kick off a purge of the outstanding lazy areas.
662 */
purge_vmap_area_lazy(void)663 static void purge_vmap_area_lazy(void)
664 {
665 unsigned long start = ULONG_MAX, end = 0;
666
667 __purge_vmap_area_lazy(&start, &end, 1, 0);
668 }
669
670 /*
671 * Free a vmap area, caller ensuring that the area has been unmapped
672 * and flush_cache_vunmap had been called for the correct range
673 * previously.
674 */
free_vmap_area_noflush(struct vmap_area * va)675 static void free_vmap_area_noflush(struct vmap_area *va)
676 {
677 va->flags |= VM_LAZY_FREE;
678 atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr);
679 if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages()))
680 try_purge_vmap_area_lazy();
681 }
682
683 /*
684 * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been
685 * called for the correct range previously.
686 */
free_unmap_vmap_area_noflush(struct vmap_area * va)687 static void free_unmap_vmap_area_noflush(struct vmap_area *va)
688 {
689 unmap_vmap_area(va);
690 free_vmap_area_noflush(va);
691 }
692
693 /*
694 * Free and unmap a vmap area
695 */
free_unmap_vmap_area(struct vmap_area * va)696 static void free_unmap_vmap_area(struct vmap_area *va)
697 {
698 flush_cache_vunmap(va->va_start, va->va_end);
699 free_unmap_vmap_area_noflush(va);
700 }
701
find_vmap_area(unsigned long addr)702 static struct vmap_area *find_vmap_area(unsigned long addr)
703 {
704 struct vmap_area *va;
705
706 spin_lock(&vmap_area_lock);
707 va = __find_vmap_area(addr);
708 spin_unlock(&vmap_area_lock);
709
710 return va;
711 }
712
free_unmap_vmap_area_addr(unsigned long addr)713 static void free_unmap_vmap_area_addr(unsigned long addr)
714 {
715 struct vmap_area *va;
716
717 va = find_vmap_area(addr);
718 BUG_ON(!va);
719 free_unmap_vmap_area(va);
720 }
721
722
723 /*** Per cpu kva allocator ***/
724
725 /*
726 * vmap space is limited especially on 32 bit architectures. Ensure there is
727 * room for at least 16 percpu vmap blocks per CPU.
728 */
729 /*
730 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
731 * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess
732 * instead (we just need a rough idea)
733 */
734 #if BITS_PER_LONG == 32
735 #define VMALLOC_SPACE (128UL*1024*1024)
736 #else
737 #define VMALLOC_SPACE (128UL*1024*1024*1024)
738 #endif
739
740 #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE)
741 #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */
742 #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */
743 #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2)
744 #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */
745 #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */
746 #define VMAP_BBMAP_BITS \
747 VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
748 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
749 VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
750
751 #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
752
753 static bool vmap_initialized __read_mostly = false;
754
755 struct vmap_block_queue {
756 spinlock_t lock;
757 struct list_head free;
758 };
759
760 struct vmap_block {
761 spinlock_t lock;
762 struct vmap_area *va;
763 unsigned long free, dirty;
764 DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS);
765 struct list_head free_list;
766 struct rcu_head rcu_head;
767 struct list_head purge;
768 };
769
770 /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
771 static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
772
773 /*
774 * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block
775 * in the free path. Could get rid of this if we change the API to return a
776 * "cookie" from alloc, to be passed to free. But no big deal yet.
777 */
778 static DEFINE_SPINLOCK(vmap_block_tree_lock);
779 static RADIX_TREE(vmap_block_tree, GFP_ATOMIC);
780
781 /*
782 * We should probably have a fallback mechanism to allocate virtual memory
783 * out of partially filled vmap blocks. However vmap block sizing should be
784 * fairly reasonable according to the vmalloc size, so it shouldn't be a
785 * big problem.
786 */
787
addr_to_vb_idx(unsigned long addr)788 static unsigned long addr_to_vb_idx(unsigned long addr)
789 {
790 addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
791 addr /= VMAP_BLOCK_SIZE;
792 return addr;
793 }
794
new_vmap_block(gfp_t gfp_mask)795 static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
796 {
797 struct vmap_block_queue *vbq;
798 struct vmap_block *vb;
799 struct vmap_area *va;
800 unsigned long vb_idx;
801 int node, err;
802
803 node = numa_node_id();
804
805 vb = kmalloc_node(sizeof(struct vmap_block),
806 gfp_mask & GFP_RECLAIM_MASK, node);
807 if (unlikely(!vb))
808 return ERR_PTR(-ENOMEM);
809
810 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
811 VMALLOC_START, VMALLOC_END,
812 node, gfp_mask);
813 if (IS_ERR(va)) {
814 kfree(vb);
815 return ERR_CAST(va);
816 }
817
818 err = radix_tree_preload(gfp_mask);
819 if (unlikely(err)) {
820 kfree(vb);
821 free_vmap_area(va);
822 return ERR_PTR(err);
823 }
824
825 spin_lock_init(&vb->lock);
826 vb->va = va;
827 vb->free = VMAP_BBMAP_BITS;
828 vb->dirty = 0;
829 bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS);
830 INIT_LIST_HEAD(&vb->free_list);
831
832 vb_idx = addr_to_vb_idx(va->va_start);
833 spin_lock(&vmap_block_tree_lock);
834 err = radix_tree_insert(&vmap_block_tree, vb_idx, vb);
835 spin_unlock(&vmap_block_tree_lock);
836 BUG_ON(err);
837 radix_tree_preload_end();
838
839 vbq = &get_cpu_var(vmap_block_queue);
840 spin_lock(&vbq->lock);
841 list_add_rcu(&vb->free_list, &vbq->free);
842 spin_unlock(&vbq->lock);
843 put_cpu_var(vmap_block_queue);
844
845 return vb;
846 }
847
free_vmap_block(struct vmap_block * vb)848 static void free_vmap_block(struct vmap_block *vb)
849 {
850 struct vmap_block *tmp;
851 unsigned long vb_idx;
852
853 vb_idx = addr_to_vb_idx(vb->va->va_start);
854 spin_lock(&vmap_block_tree_lock);
855 tmp = radix_tree_delete(&vmap_block_tree, vb_idx);
856 spin_unlock(&vmap_block_tree_lock);
857 BUG_ON(tmp != vb);
858
859 free_vmap_area_noflush(vb->va);
860 kfree_rcu(vb, rcu_head);
861 }
862
purge_fragmented_blocks(int cpu)863 static void purge_fragmented_blocks(int cpu)
864 {
865 LIST_HEAD(purge);
866 struct vmap_block *vb;
867 struct vmap_block *n_vb;
868 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
869
870 rcu_read_lock();
871 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
872
873 if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
874 continue;
875
876 spin_lock(&vb->lock);
877 if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
878 vb->free = 0; /* prevent further allocs after releasing lock */
879 vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */
880 bitmap_fill(vb->dirty_map, VMAP_BBMAP_BITS);
881 spin_lock(&vbq->lock);
882 list_del_rcu(&vb->free_list);
883 spin_unlock(&vbq->lock);
884 spin_unlock(&vb->lock);
885 list_add_tail(&vb->purge, &purge);
886 } else
887 spin_unlock(&vb->lock);
888 }
889 rcu_read_unlock();
890
891 list_for_each_entry_safe(vb, n_vb, &purge, purge) {
892 list_del(&vb->purge);
893 free_vmap_block(vb);
894 }
895 }
896
purge_fragmented_blocks_allcpus(void)897 static void purge_fragmented_blocks_allcpus(void)
898 {
899 int cpu;
900
901 for_each_possible_cpu(cpu)
902 purge_fragmented_blocks(cpu);
903 }
904
vb_alloc(unsigned long size,gfp_t gfp_mask)905 static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
906 {
907 struct vmap_block_queue *vbq;
908 struct vmap_block *vb;
909 unsigned long addr = 0;
910 unsigned int order;
911
912 BUG_ON(size & ~PAGE_MASK);
913 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
914 if (WARN_ON(size == 0)) {
915 /*
916 * Allocating 0 bytes isn't what caller wants since
917 * get_order(0) returns funny result. Just warn and terminate
918 * early.
919 */
920 return NULL;
921 }
922 order = get_order(size);
923
924 again:
925 rcu_read_lock();
926 vbq = &get_cpu_var(vmap_block_queue);
927 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
928 int i;
929
930 spin_lock(&vb->lock);
931 if (vb->free < 1UL << order)
932 goto next;
933
934 i = VMAP_BBMAP_BITS - vb->free;
935 addr = vb->va->va_start + (i << PAGE_SHIFT);
936 BUG_ON(addr_to_vb_idx(addr) !=
937 addr_to_vb_idx(vb->va->va_start));
938 vb->free -= 1UL << order;
939 if (vb->free == 0) {
940 spin_lock(&vbq->lock);
941 list_del_rcu(&vb->free_list);
942 spin_unlock(&vbq->lock);
943 }
944 spin_unlock(&vb->lock);
945 break;
946 next:
947 spin_unlock(&vb->lock);
948 }
949
950 put_cpu_var(vmap_block_queue);
951 rcu_read_unlock();
952
953 if (!addr) {
954 vb = new_vmap_block(gfp_mask);
955 if (IS_ERR(vb))
956 return vb;
957 goto again;
958 }
959
960 return (void *)addr;
961 }
962
vb_free(const void * addr,unsigned long size)963 static void vb_free(const void *addr, unsigned long size)
964 {
965 unsigned long offset;
966 unsigned long vb_idx;
967 unsigned int order;
968 struct vmap_block *vb;
969
970 BUG_ON(size & ~PAGE_MASK);
971 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
972
973 flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size);
974
975 order = get_order(size);
976
977 offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1);
978
979 vb_idx = addr_to_vb_idx((unsigned long)addr);
980 rcu_read_lock();
981 vb = radix_tree_lookup(&vmap_block_tree, vb_idx);
982 rcu_read_unlock();
983 BUG_ON(!vb);
984
985 vunmap_page_range((unsigned long)addr, (unsigned long)addr + size);
986
987 spin_lock(&vb->lock);
988 BUG_ON(bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order));
989
990 vb->dirty += 1UL << order;
991 if (vb->dirty == VMAP_BBMAP_BITS) {
992 BUG_ON(vb->free);
993 spin_unlock(&vb->lock);
994 free_vmap_block(vb);
995 } else
996 spin_unlock(&vb->lock);
997 }
998
999 /**
1000 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
1001 *
1002 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
1003 * to amortize TLB flushing overheads. What this means is that any page you
1004 * have now, may, in a former life, have been mapped into kernel virtual
1005 * address by the vmap layer and so there might be some CPUs with TLB entries
1006 * still referencing that page (additional to the regular 1:1 kernel mapping).
1007 *
1008 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
1009 * be sure that none of the pages we have control over will have any aliases
1010 * from the vmap layer.
1011 */
vm_unmap_aliases(void)1012 void vm_unmap_aliases(void)
1013 {
1014 unsigned long start = ULONG_MAX, end = 0;
1015 int cpu;
1016 int flush = 0;
1017
1018 if (unlikely(!vmap_initialized))
1019 return;
1020
1021 for_each_possible_cpu(cpu) {
1022 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
1023 struct vmap_block *vb;
1024
1025 rcu_read_lock();
1026 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1027 int i, j;
1028
1029 spin_lock(&vb->lock);
1030 i = find_first_bit(vb->dirty_map, VMAP_BBMAP_BITS);
1031 if (i < VMAP_BBMAP_BITS) {
1032 unsigned long s, e;
1033
1034 j = find_last_bit(vb->dirty_map,
1035 VMAP_BBMAP_BITS);
1036 j = j + 1; /* need exclusive index */
1037
1038 s = vb->va->va_start + (i << PAGE_SHIFT);
1039 e = vb->va->va_start + (j << PAGE_SHIFT);
1040 flush = 1;
1041
1042 if (s < start)
1043 start = s;
1044 if (e > end)
1045 end = e;
1046 }
1047 spin_unlock(&vb->lock);
1048 }
1049 rcu_read_unlock();
1050 }
1051
1052 __purge_vmap_area_lazy(&start, &end, 1, flush);
1053 }
1054 EXPORT_SYMBOL_GPL(vm_unmap_aliases);
1055
1056 /**
1057 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
1058 * @mem: the pointer returned by vm_map_ram
1059 * @count: the count passed to that vm_map_ram call (cannot unmap partial)
1060 */
vm_unmap_ram(const void * mem,unsigned int count)1061 void vm_unmap_ram(const void *mem, unsigned int count)
1062 {
1063 unsigned long size = count << PAGE_SHIFT;
1064 unsigned long addr = (unsigned long)mem;
1065
1066 BUG_ON(!addr);
1067 BUG_ON(addr < VMALLOC_START);
1068 BUG_ON(addr > VMALLOC_END);
1069 BUG_ON(addr & (PAGE_SIZE-1));
1070
1071 debug_check_no_locks_freed(mem, size);
1072 vmap_debug_free_range(addr, addr+size);
1073
1074 if (likely(count <= VMAP_MAX_ALLOC))
1075 vb_free(mem, size);
1076 else
1077 free_unmap_vmap_area_addr(addr);
1078 }
1079 EXPORT_SYMBOL(vm_unmap_ram);
1080
1081 /**
1082 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
1083 * @pages: an array of pointers to the pages to be mapped
1084 * @count: number of pages
1085 * @node: prefer to allocate data structures on this node
1086 * @prot: memory protection to use. PAGE_KERNEL for regular RAM
1087 *
1088 * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
1089 * faster than vmap so it's good. But if you mix long-life and short-life
1090 * objects with vm_map_ram(), it could consume lots of address space through
1091 * fragmentation (especially on a 32bit machine). You could see failures in
1092 * the end. Please use this function for short-lived objects.
1093 *
1094 * Returns: a pointer to the address that has been mapped, or %NULL on failure
1095 */
vm_map_ram(struct page ** pages,unsigned int count,int node,pgprot_t prot)1096 void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
1097 {
1098 unsigned long size = count << PAGE_SHIFT;
1099 unsigned long addr;
1100 void *mem;
1101
1102 if (likely(count <= VMAP_MAX_ALLOC)) {
1103 mem = vb_alloc(size, GFP_KERNEL);
1104 if (IS_ERR(mem))
1105 return NULL;
1106 addr = (unsigned long)mem;
1107 } else {
1108 struct vmap_area *va;
1109 va = alloc_vmap_area(size, PAGE_SIZE,
1110 VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
1111 if (IS_ERR(va))
1112 return NULL;
1113
1114 addr = va->va_start;
1115 mem = (void *)addr;
1116 }
1117 if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
1118 vm_unmap_ram(mem, count);
1119 return NULL;
1120 }
1121 return mem;
1122 }
1123 EXPORT_SYMBOL(vm_map_ram);
1124
1125 static struct vm_struct *vmlist __initdata;
1126 /**
1127 * vm_area_add_early - add vmap area early during boot
1128 * @vm: vm_struct to add
1129 *
1130 * This function is used to add fixed kernel vm area to vmlist before
1131 * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags
1132 * should contain proper values and the other fields should be zero.
1133 *
1134 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
1135 */
vm_area_add_early(struct vm_struct * vm)1136 void __init vm_area_add_early(struct vm_struct *vm)
1137 {
1138 struct vm_struct *tmp, **p;
1139
1140 BUG_ON(vmap_initialized);
1141 for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
1142 if (tmp->addr >= vm->addr) {
1143 BUG_ON(tmp->addr < vm->addr + vm->size);
1144 break;
1145 } else
1146 BUG_ON(tmp->addr + tmp->size > vm->addr);
1147 }
1148 vm->next = *p;
1149 *p = vm;
1150 }
1151
1152 /**
1153 * vm_area_register_early - register vmap area early during boot
1154 * @vm: vm_struct to register
1155 * @align: requested alignment
1156 *
1157 * This function is used to register kernel vm area before
1158 * vmalloc_init() is called. @vm->size and @vm->flags should contain
1159 * proper values on entry and other fields should be zero. On return,
1160 * vm->addr contains the allocated address.
1161 *
1162 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
1163 */
vm_area_register_early(struct vm_struct * vm,size_t align)1164 void __init vm_area_register_early(struct vm_struct *vm, size_t align)
1165 {
1166 static size_t vm_init_off __initdata;
1167 unsigned long addr;
1168
1169 addr = ALIGN(VMALLOC_START + vm_init_off, align);
1170 vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
1171
1172 vm->addr = (void *)addr;
1173
1174 vm_area_add_early(vm);
1175 }
1176
vmalloc_init(void)1177 void __init vmalloc_init(void)
1178 {
1179 struct vmap_area *va;
1180 struct vm_struct *tmp;
1181 int i;
1182
1183 for_each_possible_cpu(i) {
1184 struct vmap_block_queue *vbq;
1185 struct vfree_deferred *p;
1186
1187 vbq = &per_cpu(vmap_block_queue, i);
1188 spin_lock_init(&vbq->lock);
1189 INIT_LIST_HEAD(&vbq->free);
1190 p = &per_cpu(vfree_deferred, i);
1191 init_llist_head(&p->list);
1192 INIT_WORK(&p->wq, free_work);
1193 }
1194
1195 /* Import existing vmlist entries. */
1196 for (tmp = vmlist; tmp; tmp = tmp->next) {
1197 va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT);
1198 va->flags = VM_VM_AREA;
1199 va->va_start = (unsigned long)tmp->addr;
1200 va->va_end = va->va_start + tmp->size;
1201 va->vm = tmp;
1202 __insert_vmap_area(va);
1203 }
1204
1205 vmap_area_pcpu_hole = VMALLOC_END;
1206
1207 vmap_initialized = true;
1208 }
1209
1210 /**
1211 * map_kernel_range_noflush - map kernel VM area with the specified pages
1212 * @addr: start of the VM area to map
1213 * @size: size of the VM area to map
1214 * @prot: page protection flags to use
1215 * @pages: pages to map
1216 *
1217 * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size
1218 * specify should have been allocated using get_vm_area() and its
1219 * friends.
1220 *
1221 * NOTE:
1222 * This function does NOT do any cache flushing. The caller is
1223 * responsible for calling flush_cache_vmap() on to-be-mapped areas
1224 * before calling this function.
1225 *
1226 * RETURNS:
1227 * The number of pages mapped on success, -errno on failure.
1228 */
map_kernel_range_noflush(unsigned long addr,unsigned long size,pgprot_t prot,struct page ** pages)1229 int map_kernel_range_noflush(unsigned long addr, unsigned long size,
1230 pgprot_t prot, struct page **pages)
1231 {
1232 return vmap_page_range_noflush(addr, addr + size, prot, pages);
1233 }
1234
1235 /**
1236 * unmap_kernel_range_noflush - unmap kernel VM area
1237 * @addr: start of the VM area to unmap
1238 * @size: size of the VM area to unmap
1239 *
1240 * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size
1241 * specify should have been allocated using get_vm_area() and its
1242 * friends.
1243 *
1244 * NOTE:
1245 * This function does NOT do any cache flushing. The caller is
1246 * responsible for calling flush_cache_vunmap() on to-be-mapped areas
1247 * before calling this function and flush_tlb_kernel_range() after.
1248 */
unmap_kernel_range_noflush(unsigned long addr,unsigned long size)1249 void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
1250 {
1251 vunmap_page_range(addr, addr + size);
1252 }
1253 EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush);
1254
1255 /**
1256 * unmap_kernel_range - unmap kernel VM area and flush cache and TLB
1257 * @addr: start of the VM area to unmap
1258 * @size: size of the VM area to unmap
1259 *
1260 * Similar to unmap_kernel_range_noflush() but flushes vcache before
1261 * the unmapping and tlb after.
1262 */
unmap_kernel_range(unsigned long addr,unsigned long size)1263 void unmap_kernel_range(unsigned long addr, unsigned long size)
1264 {
1265 unsigned long end = addr + size;
1266
1267 flush_cache_vunmap(addr, end);
1268 vunmap_page_range(addr, end);
1269 flush_tlb_kernel_range(addr, end);
1270 }
1271 EXPORT_SYMBOL_GPL(unmap_kernel_range);
1272
map_vm_area(struct vm_struct * area,pgprot_t prot,struct page ** pages)1273 int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages)
1274 {
1275 unsigned long addr = (unsigned long)area->addr;
1276 unsigned long end = addr + get_vm_area_size(area);
1277 int err;
1278
1279 err = vmap_page_range(addr, end, prot, pages);
1280
1281 return err > 0 ? 0 : err;
1282 }
1283 EXPORT_SYMBOL_GPL(map_vm_area);
1284
setup_vmalloc_vm(struct vm_struct * vm,struct vmap_area * va,unsigned long flags,const void * caller)1285 static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
1286 unsigned long flags, const void *caller)
1287 {
1288 spin_lock(&vmap_area_lock);
1289 vm->flags = flags;
1290 vm->addr = (void *)va->va_start;
1291 vm->size = va->va_end - va->va_start;
1292 vm->caller = caller;
1293 va->vm = vm;
1294 va->flags |= VM_VM_AREA;
1295 spin_unlock(&vmap_area_lock);
1296 }
1297
clear_vm_uninitialized_flag(struct vm_struct * vm)1298 static void clear_vm_uninitialized_flag(struct vm_struct *vm)
1299 {
1300 /*
1301 * Before removing VM_UNINITIALIZED,
1302 * we should make sure that vm has proper values.
1303 * Pair with smp_rmb() in show_numa_info().
1304 */
1305 smp_wmb();
1306 vm->flags &= ~VM_UNINITIALIZED;
1307 }
1308
__get_vm_area_node(unsigned long size,unsigned long align,unsigned long flags,unsigned long start,unsigned long end,int node,gfp_t gfp_mask,const void * caller)1309 static struct vm_struct *__get_vm_area_node(unsigned long size,
1310 unsigned long align, unsigned long flags, unsigned long start,
1311 unsigned long end, int node, gfp_t gfp_mask, const void *caller)
1312 {
1313 struct vmap_area *va;
1314 struct vm_struct *area;
1315
1316 BUG_ON(in_interrupt());
1317 if (flags & VM_IOREMAP)
1318 align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER);
1319
1320 size = PAGE_ALIGN(size);
1321 if (unlikely(!size))
1322 return NULL;
1323
1324 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
1325 if (unlikely(!area))
1326 return NULL;
1327
1328 /*
1329 * We always allocate a guard page.
1330 */
1331 size += PAGE_SIZE;
1332
1333 va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
1334 if (IS_ERR(va)) {
1335 kfree(area);
1336 return NULL;
1337 }
1338
1339 setup_vmalloc_vm(area, va, flags, caller);
1340
1341 return area;
1342 }
1343
__get_vm_area(unsigned long size,unsigned long flags,unsigned long start,unsigned long end)1344 struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
1345 unsigned long start, unsigned long end)
1346 {
1347 return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
1348 GFP_KERNEL, __builtin_return_address(0));
1349 }
1350 EXPORT_SYMBOL_GPL(__get_vm_area);
1351
__get_vm_area_caller(unsigned long size,unsigned long flags,unsigned long start,unsigned long end,const void * caller)1352 struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
1353 unsigned long start, unsigned long end,
1354 const void *caller)
1355 {
1356 return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
1357 GFP_KERNEL, caller);
1358 }
1359
1360 /**
1361 * get_vm_area - reserve a contiguous kernel virtual area
1362 * @size: size of the area
1363 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
1364 *
1365 * Search an area of @size in the kernel virtual mapping area,
1366 * and reserved it for out purposes. Returns the area descriptor
1367 * on success or %NULL on failure.
1368 */
get_vm_area(unsigned long size,unsigned long flags)1369 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
1370 {
1371 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
1372 NUMA_NO_NODE, GFP_KERNEL,
1373 __builtin_return_address(0));
1374 }
1375
get_vm_area_caller(unsigned long size,unsigned long flags,const void * caller)1376 struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
1377 const void *caller)
1378 {
1379 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
1380 NUMA_NO_NODE, GFP_KERNEL, caller);
1381 }
1382
1383 /**
1384 * find_vm_area - find a continuous kernel virtual area
1385 * @addr: base address
1386 *
1387 * Search for the kernel VM area starting at @addr, and return it.
1388 * It is up to the caller to do all required locking to keep the returned
1389 * pointer valid.
1390 */
find_vm_area(const void * addr)1391 struct vm_struct *find_vm_area(const void *addr)
1392 {
1393 struct vmap_area *va;
1394
1395 va = find_vmap_area((unsigned long)addr);
1396 if (va && va->flags & VM_VM_AREA)
1397 return va->vm;
1398
1399 return NULL;
1400 }
1401
1402 /**
1403 * remove_vm_area - find and remove a continuous kernel virtual area
1404 * @addr: base address
1405 *
1406 * Search for the kernel VM area starting at @addr, and remove it.
1407 * This function returns the found VM area, but using it is NOT safe
1408 * on SMP machines, except for its size or flags.
1409 */
remove_vm_area(const void * addr)1410 struct vm_struct *remove_vm_area(const void *addr)
1411 {
1412 struct vmap_area *va;
1413
1414 va = find_vmap_area((unsigned long)addr);
1415 if (va && va->flags & VM_VM_AREA) {
1416 struct vm_struct *vm = va->vm;
1417
1418 spin_lock(&vmap_area_lock);
1419 va->vm = NULL;
1420 va->flags &= ~VM_VM_AREA;
1421 spin_unlock(&vmap_area_lock);
1422
1423 vmap_debug_free_range(va->va_start, va->va_end);
1424 free_unmap_vmap_area(va);
1425 vm->size -= PAGE_SIZE;
1426
1427 return vm;
1428 }
1429 return NULL;
1430 }
1431
__vunmap(const void * addr,int deallocate_pages)1432 static void __vunmap(const void *addr, int deallocate_pages)
1433 {
1434 struct vm_struct *area;
1435
1436 if (!addr)
1437 return;
1438
1439 if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
1440 addr))
1441 return;
1442
1443 area = remove_vm_area(addr);
1444 if (unlikely(!area)) {
1445 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
1446 addr);
1447 return;
1448 }
1449
1450 debug_check_no_locks_freed(addr, area->size);
1451 debug_check_no_obj_freed(addr, area->size);
1452
1453 if (deallocate_pages) {
1454 int i;
1455
1456 for (i = 0; i < area->nr_pages; i++) {
1457 struct page *page = area->pages[i];
1458
1459 BUG_ON(!page);
1460 __free_page(page);
1461 }
1462
1463 if (area->flags & VM_VPAGES)
1464 vfree(area->pages);
1465 else
1466 kfree(area->pages);
1467 }
1468
1469 kfree(area);
1470 return;
1471 }
1472
1473 /**
1474 * vfree - release memory allocated by vmalloc()
1475 * @addr: memory base address
1476 *
1477 * Free the virtually continuous memory area starting at @addr, as
1478 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
1479 * NULL, no operation is performed.
1480 *
1481 * Must not be called in NMI context (strictly speaking, only if we don't
1482 * have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
1483 * conventions for vfree() arch-depenedent would be a really bad idea)
1484 *
1485 * NOTE: assumes that the object at *addr has a size >= sizeof(llist_node)
1486 */
vfree(const void * addr)1487 void vfree(const void *addr)
1488 {
1489 BUG_ON(in_nmi());
1490
1491 kmemleak_free(addr);
1492
1493 if (!addr)
1494 return;
1495 if (unlikely(in_interrupt())) {
1496 struct vfree_deferred *p = this_cpu_ptr(&vfree_deferred);
1497 if (llist_add((struct llist_node *)addr, &p->list))
1498 schedule_work(&p->wq);
1499 } else
1500 __vunmap(addr, 1);
1501 }
1502 EXPORT_SYMBOL(vfree);
1503
1504 /**
1505 * vunmap - release virtual mapping obtained by vmap()
1506 * @addr: memory base address
1507 *
1508 * Free the virtually contiguous memory area starting at @addr,
1509 * which was created from the page array passed to vmap().
1510 *
1511 * Must not be called in interrupt context.
1512 */
vunmap(const void * addr)1513 void vunmap(const void *addr)
1514 {
1515 BUG_ON(in_interrupt());
1516 might_sleep();
1517 if (addr)
1518 __vunmap(addr, 0);
1519 }
1520 EXPORT_SYMBOL(vunmap);
1521
1522 /**
1523 * vmap - map an array of pages into virtually contiguous space
1524 * @pages: array of page pointers
1525 * @count: number of pages to map
1526 * @flags: vm_area->flags
1527 * @prot: page protection for the mapping
1528 *
1529 * Maps @count pages from @pages into contiguous kernel virtual
1530 * space.
1531 */
vmap(struct page ** pages,unsigned int count,unsigned long flags,pgprot_t prot)1532 void *vmap(struct page **pages, unsigned int count,
1533 unsigned long flags, pgprot_t prot)
1534 {
1535 struct vm_struct *area;
1536
1537 might_sleep();
1538
1539 if (count > totalram_pages)
1540 return NULL;
1541
1542 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
1543 __builtin_return_address(0));
1544 if (!area)
1545 return NULL;
1546
1547 if (map_vm_area(area, prot, pages)) {
1548 vunmap(area->addr);
1549 return NULL;
1550 }
1551
1552 return area->addr;
1553 }
1554 EXPORT_SYMBOL(vmap);
1555
1556 static void *__vmalloc_node(unsigned long size, unsigned long align,
1557 gfp_t gfp_mask, pgprot_t prot,
1558 int node, const void *caller);
__vmalloc_area_node(struct vm_struct * area,gfp_t gfp_mask,pgprot_t prot,int node)1559 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1560 pgprot_t prot, int node)
1561 {
1562 const int order = 0;
1563 struct page **pages;
1564 unsigned int nr_pages, array_size, i;
1565 const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
1566 const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN;
1567
1568 nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
1569 array_size = (nr_pages * sizeof(struct page *));
1570
1571 area->nr_pages = nr_pages;
1572 /* Please note that the recursion is strictly bounded. */
1573 if (array_size > PAGE_SIZE) {
1574 pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM,
1575 PAGE_KERNEL, node, area->caller);
1576 area->flags |= VM_VPAGES;
1577 } else {
1578 pages = kmalloc_node(array_size, nested_gfp, node);
1579 }
1580 area->pages = pages;
1581 if (!area->pages) {
1582 remove_vm_area(area->addr);
1583 kfree(area);
1584 return NULL;
1585 }
1586
1587 for (i = 0; i < area->nr_pages; i++) {
1588 struct page *page;
1589
1590 if (node == NUMA_NO_NODE)
1591 page = alloc_page(alloc_mask);
1592 else
1593 page = alloc_pages_node(node, alloc_mask, order);
1594
1595 if (unlikely(!page)) {
1596 /* Successfully allocated i pages, free them in __vunmap() */
1597 area->nr_pages = i;
1598 goto fail;
1599 }
1600 area->pages[i] = page;
1601 if (gfp_mask & __GFP_WAIT)
1602 cond_resched();
1603 }
1604
1605 if (map_vm_area(area, prot, pages))
1606 goto fail;
1607 return area->addr;
1608
1609 fail:
1610 warn_alloc_failed(gfp_mask, order,
1611 "vmalloc: allocation failure, allocated %ld of %ld bytes\n",
1612 (area->nr_pages*PAGE_SIZE), area->size);
1613 vfree(area->addr);
1614 return NULL;
1615 }
1616
1617 /**
1618 * __vmalloc_node_range - allocate virtually contiguous memory
1619 * @size: allocation size
1620 * @align: desired alignment
1621 * @start: vm area range start
1622 * @end: vm area range end
1623 * @gfp_mask: flags for the page level allocator
1624 * @prot: protection mask for the allocated pages
1625 * @node: node to use for allocation or NUMA_NO_NODE
1626 * @caller: caller's return address
1627 *
1628 * Allocate enough pages to cover @size from the page level
1629 * allocator with @gfp_mask flags. Map them into contiguous
1630 * kernel virtual space, using a pagetable protection of @prot.
1631 */
__vmalloc_node_range(unsigned long size,unsigned long align,unsigned long start,unsigned long end,gfp_t gfp_mask,pgprot_t prot,int node,const void * caller)1632 void *__vmalloc_node_range(unsigned long size, unsigned long align,
1633 unsigned long start, unsigned long end, gfp_t gfp_mask,
1634 pgprot_t prot, int node, const void *caller)
1635 {
1636 struct vm_struct *area;
1637 void *addr;
1638 unsigned long real_size = size;
1639
1640 size = PAGE_ALIGN(size);
1641 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
1642 goto fail;
1643
1644 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED,
1645 start, end, node, gfp_mask, caller);
1646 if (!area)
1647 goto fail;
1648
1649 addr = __vmalloc_area_node(area, gfp_mask, prot, node);
1650 if (!addr)
1651 return NULL;
1652
1653 /*
1654 * In this function, newly allocated vm_struct has VM_UNINITIALIZED
1655 * flag. It means that vm_struct is not fully initialized.
1656 * Now, it is fully initialized, so remove this flag here.
1657 */
1658 clear_vm_uninitialized_flag(area);
1659
1660 /*
1661 * A ref_count = 2 is needed because vm_struct allocated in
1662 * __get_vm_area_node() contains a reference to the virtual address of
1663 * the vmalloc'ed block.
1664 */
1665 kmemleak_alloc(addr, real_size, 2, gfp_mask);
1666
1667 return addr;
1668
1669 fail:
1670 warn_alloc_failed(gfp_mask, 0,
1671 "vmalloc: allocation failure: %lu bytes\n",
1672 real_size);
1673 return NULL;
1674 }
1675
1676 /**
1677 * __vmalloc_node - allocate virtually contiguous memory
1678 * @size: allocation size
1679 * @align: desired alignment
1680 * @gfp_mask: flags for the page level allocator
1681 * @prot: protection mask for the allocated pages
1682 * @node: node to use for allocation or NUMA_NO_NODE
1683 * @caller: caller's return address
1684 *
1685 * Allocate enough pages to cover @size from the page level
1686 * allocator with @gfp_mask flags. Map them into contiguous
1687 * kernel virtual space, using a pagetable protection of @prot.
1688 */
__vmalloc_node(unsigned long size,unsigned long align,gfp_t gfp_mask,pgprot_t prot,int node,const void * caller)1689 static void *__vmalloc_node(unsigned long size, unsigned long align,
1690 gfp_t gfp_mask, pgprot_t prot,
1691 int node, const void *caller)
1692 {
1693 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
1694 gfp_mask, prot, node, caller);
1695 }
1696
__vmalloc(unsigned long size,gfp_t gfp_mask,pgprot_t prot)1697 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
1698 {
1699 return __vmalloc_node(size, 1, gfp_mask, prot, NUMA_NO_NODE,
1700 __builtin_return_address(0));
1701 }
1702 EXPORT_SYMBOL(__vmalloc);
1703
__vmalloc_node_flags(unsigned long size,int node,gfp_t flags)1704 static inline void *__vmalloc_node_flags(unsigned long size,
1705 int node, gfp_t flags)
1706 {
1707 return __vmalloc_node(size, 1, flags, PAGE_KERNEL,
1708 node, __builtin_return_address(0));
1709 }
1710
1711 /**
1712 * vmalloc - allocate virtually contiguous memory
1713 * @size: allocation size
1714 * Allocate enough pages to cover @size from the page level
1715 * allocator and map them into contiguous kernel virtual space.
1716 *
1717 * For tight control over page level allocator and protection flags
1718 * use __vmalloc() instead.
1719 */
vmalloc(unsigned long size)1720 void *vmalloc(unsigned long size)
1721 {
1722 return __vmalloc_node_flags(size, NUMA_NO_NODE,
1723 GFP_KERNEL | __GFP_HIGHMEM);
1724 }
1725 EXPORT_SYMBOL(vmalloc);
1726
1727 /**
1728 * vzalloc - allocate virtually contiguous memory with zero fill
1729 * @size: allocation size
1730 * Allocate enough pages to cover @size from the page level
1731 * allocator and map them into contiguous kernel virtual space.
1732 * The memory allocated is set to zero.
1733 *
1734 * For tight control over page level allocator and protection flags
1735 * use __vmalloc() instead.
1736 */
vzalloc(unsigned long size)1737 void *vzalloc(unsigned long size)
1738 {
1739 return __vmalloc_node_flags(size, NUMA_NO_NODE,
1740 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
1741 }
1742 EXPORT_SYMBOL(vzalloc);
1743
1744 /**
1745 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
1746 * @size: allocation size
1747 *
1748 * The resulting memory area is zeroed so it can be mapped to userspace
1749 * without leaking data.
1750 */
vmalloc_user(unsigned long size)1751 void *vmalloc_user(unsigned long size)
1752 {
1753 struct vm_struct *area;
1754 void *ret;
1755
1756 ret = __vmalloc_node(size, SHMLBA,
1757 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
1758 PAGE_KERNEL, NUMA_NO_NODE,
1759 __builtin_return_address(0));
1760 if (ret) {
1761 area = find_vm_area(ret);
1762 area->flags |= VM_USERMAP;
1763 }
1764 return ret;
1765 }
1766 EXPORT_SYMBOL(vmalloc_user);
1767
1768 /**
1769 * vmalloc_node - allocate memory on a specific node
1770 * @size: allocation size
1771 * @node: numa node
1772 *
1773 * Allocate enough pages to cover @size from the page level
1774 * allocator and map them into contiguous kernel virtual space.
1775 *
1776 * For tight control over page level allocator and protection flags
1777 * use __vmalloc() instead.
1778 */
vmalloc_node(unsigned long size,int node)1779 void *vmalloc_node(unsigned long size, int node)
1780 {
1781 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
1782 node, __builtin_return_address(0));
1783 }
1784 EXPORT_SYMBOL(vmalloc_node);
1785
1786 /**
1787 * vzalloc_node - allocate memory on a specific node with zero fill
1788 * @size: allocation size
1789 * @node: numa node
1790 *
1791 * Allocate enough pages to cover @size from the page level
1792 * allocator and map them into contiguous kernel virtual space.
1793 * The memory allocated is set to zero.
1794 *
1795 * For tight control over page level allocator and protection flags
1796 * use __vmalloc_node() instead.
1797 */
vzalloc_node(unsigned long size,int node)1798 void *vzalloc_node(unsigned long size, int node)
1799 {
1800 return __vmalloc_node_flags(size, node,
1801 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
1802 }
1803 EXPORT_SYMBOL(vzalloc_node);
1804
1805 #ifndef PAGE_KERNEL_EXEC
1806 # define PAGE_KERNEL_EXEC PAGE_KERNEL
1807 #endif
1808
1809 /**
1810 * vmalloc_exec - allocate virtually contiguous, executable memory
1811 * @size: allocation size
1812 *
1813 * Kernel-internal function to allocate enough pages to cover @size
1814 * the page level allocator and map them into contiguous and
1815 * executable kernel virtual space.
1816 *
1817 * For tight control over page level allocator and protection flags
1818 * use __vmalloc() instead.
1819 */
1820
vmalloc_exec(unsigned long size)1821 void *vmalloc_exec(unsigned long size)
1822 {
1823 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
1824 NUMA_NO_NODE, __builtin_return_address(0));
1825 }
1826
1827 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
1828 #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
1829 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
1830 #define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
1831 #else
1832 #define GFP_VMALLOC32 GFP_KERNEL
1833 #endif
1834
1835 /**
1836 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
1837 * @size: allocation size
1838 *
1839 * Allocate enough 32bit PA addressable pages to cover @size from the
1840 * page level allocator and map them into contiguous kernel virtual space.
1841 */
vmalloc_32(unsigned long size)1842 void *vmalloc_32(unsigned long size)
1843 {
1844 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
1845 NUMA_NO_NODE, __builtin_return_address(0));
1846 }
1847 EXPORT_SYMBOL(vmalloc_32);
1848
1849 /**
1850 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
1851 * @size: allocation size
1852 *
1853 * The resulting memory area is 32bit addressable and zeroed so it can be
1854 * mapped to userspace without leaking data.
1855 */
vmalloc_32_user(unsigned long size)1856 void *vmalloc_32_user(unsigned long size)
1857 {
1858 struct vm_struct *area;
1859 void *ret;
1860
1861 ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
1862 NUMA_NO_NODE, __builtin_return_address(0));
1863 if (ret) {
1864 area = find_vm_area(ret);
1865 area->flags |= VM_USERMAP;
1866 }
1867 return ret;
1868 }
1869 EXPORT_SYMBOL(vmalloc_32_user);
1870
1871 /*
1872 * small helper routine , copy contents to buf from addr.
1873 * If the page is not present, fill zero.
1874 */
1875
aligned_vread(char * buf,char * addr,unsigned long count)1876 static int aligned_vread(char *buf, char *addr, unsigned long count)
1877 {
1878 struct page *p;
1879 int copied = 0;
1880
1881 while (count) {
1882 unsigned long offset, length;
1883
1884 offset = (unsigned long)addr & ~PAGE_MASK;
1885 length = PAGE_SIZE - offset;
1886 if (length > count)
1887 length = count;
1888 p = vmalloc_to_page(addr);
1889 /*
1890 * To do safe access to this _mapped_ area, we need
1891 * lock. But adding lock here means that we need to add
1892 * overhead of vmalloc()/vfree() calles for this _debug_
1893 * interface, rarely used. Instead of that, we'll use
1894 * kmap() and get small overhead in this access function.
1895 */
1896 if (p) {
1897 /*
1898 * we can expect USER0 is not used (see vread/vwrite's
1899 * function description)
1900 */
1901 void *map = kmap_atomic(p);
1902 memcpy(buf, map + offset, length);
1903 kunmap_atomic(map);
1904 } else
1905 memset(buf, 0, length);
1906
1907 addr += length;
1908 buf += length;
1909 copied += length;
1910 count -= length;
1911 }
1912 return copied;
1913 }
1914
aligned_vwrite(char * buf,char * addr,unsigned long count)1915 static int aligned_vwrite(char *buf, char *addr, unsigned long count)
1916 {
1917 struct page *p;
1918 int copied = 0;
1919
1920 while (count) {
1921 unsigned long offset, length;
1922
1923 offset = (unsigned long)addr & ~PAGE_MASK;
1924 length = PAGE_SIZE - offset;
1925 if (length > count)
1926 length = count;
1927 p = vmalloc_to_page(addr);
1928 /*
1929 * To do safe access to this _mapped_ area, we need
1930 * lock. But adding lock here means that we need to add
1931 * overhead of vmalloc()/vfree() calles for this _debug_
1932 * interface, rarely used. Instead of that, we'll use
1933 * kmap() and get small overhead in this access function.
1934 */
1935 if (p) {
1936 /*
1937 * we can expect USER0 is not used (see vread/vwrite's
1938 * function description)
1939 */
1940 void *map = kmap_atomic(p);
1941 memcpy(map + offset, buf, length);
1942 kunmap_atomic(map);
1943 }
1944 addr += length;
1945 buf += length;
1946 copied += length;
1947 count -= length;
1948 }
1949 return copied;
1950 }
1951
1952 /**
1953 * vread() - read vmalloc area in a safe way.
1954 * @buf: buffer for reading data
1955 * @addr: vm address.
1956 * @count: number of bytes to be read.
1957 *
1958 * Returns # of bytes which addr and buf should be increased.
1959 * (same number to @count). Returns 0 if [addr...addr+count) doesn't
1960 * includes any intersect with alive vmalloc area.
1961 *
1962 * This function checks that addr is a valid vmalloc'ed area, and
1963 * copy data from that area to a given buffer. If the given memory range
1964 * of [addr...addr+count) includes some valid address, data is copied to
1965 * proper area of @buf. If there are memory holes, they'll be zero-filled.
1966 * IOREMAP area is treated as memory hole and no copy is done.
1967 *
1968 * If [addr...addr+count) doesn't includes any intersects with alive
1969 * vm_struct area, returns 0. @buf should be kernel's buffer.
1970 *
1971 * Note: In usual ops, vread() is never necessary because the caller
1972 * should know vmalloc() area is valid and can use memcpy().
1973 * This is for routines which have to access vmalloc area without
1974 * any informaion, as /dev/kmem.
1975 *
1976 */
1977
vread(char * buf,char * addr,unsigned long count)1978 long vread(char *buf, char *addr, unsigned long count)
1979 {
1980 struct vmap_area *va;
1981 struct vm_struct *vm;
1982 char *vaddr, *buf_start = buf;
1983 unsigned long buflen = count;
1984 unsigned long n;
1985
1986 /* Don't allow overflow */
1987 if ((unsigned long) addr + count < count)
1988 count = -(unsigned long) addr;
1989
1990 spin_lock(&vmap_area_lock);
1991 list_for_each_entry(va, &vmap_area_list, list) {
1992 if (!count)
1993 break;
1994
1995 if (!(va->flags & VM_VM_AREA))
1996 continue;
1997
1998 vm = va->vm;
1999 vaddr = (char *) vm->addr;
2000 if (addr >= vaddr + get_vm_area_size(vm))
2001 continue;
2002 while (addr < vaddr) {
2003 if (count == 0)
2004 goto finished;
2005 *buf = '\0';
2006 buf++;
2007 addr++;
2008 count--;
2009 }
2010 n = vaddr + get_vm_area_size(vm) - addr;
2011 if (n > count)
2012 n = count;
2013 if (!(vm->flags & VM_IOREMAP))
2014 aligned_vread(buf, addr, n);
2015 else /* IOREMAP area is treated as memory hole */
2016 memset(buf, 0, n);
2017 buf += n;
2018 addr += n;
2019 count -= n;
2020 }
2021 finished:
2022 spin_unlock(&vmap_area_lock);
2023
2024 if (buf == buf_start)
2025 return 0;
2026 /* zero-fill memory holes */
2027 if (buf != buf_start + buflen)
2028 memset(buf, 0, buflen - (buf - buf_start));
2029
2030 return buflen;
2031 }
2032
2033 /**
2034 * vwrite() - write vmalloc area in a safe way.
2035 * @buf: buffer for source data
2036 * @addr: vm address.
2037 * @count: number of bytes to be read.
2038 *
2039 * Returns # of bytes which addr and buf should be incresed.
2040 * (same number to @count).
2041 * If [addr...addr+count) doesn't includes any intersect with valid
2042 * vmalloc area, returns 0.
2043 *
2044 * This function checks that addr is a valid vmalloc'ed area, and
2045 * copy data from a buffer to the given addr. If specified range of
2046 * [addr...addr+count) includes some valid address, data is copied from
2047 * proper area of @buf. If there are memory holes, no copy to hole.
2048 * IOREMAP area is treated as memory hole and no copy is done.
2049 *
2050 * If [addr...addr+count) doesn't includes any intersects with alive
2051 * vm_struct area, returns 0. @buf should be kernel's buffer.
2052 *
2053 * Note: In usual ops, vwrite() is never necessary because the caller
2054 * should know vmalloc() area is valid and can use memcpy().
2055 * This is for routines which have to access vmalloc area without
2056 * any informaion, as /dev/kmem.
2057 */
2058
vwrite(char * buf,char * addr,unsigned long count)2059 long vwrite(char *buf, char *addr, unsigned long count)
2060 {
2061 struct vmap_area *va;
2062 struct vm_struct *vm;
2063 char *vaddr;
2064 unsigned long n, buflen;
2065 int copied = 0;
2066
2067 /* Don't allow overflow */
2068 if ((unsigned long) addr + count < count)
2069 count = -(unsigned long) addr;
2070 buflen = count;
2071
2072 spin_lock(&vmap_area_lock);
2073 list_for_each_entry(va, &vmap_area_list, list) {
2074 if (!count)
2075 break;
2076
2077 if (!(va->flags & VM_VM_AREA))
2078 continue;
2079
2080 vm = va->vm;
2081 vaddr = (char *) vm->addr;
2082 if (addr >= vaddr + get_vm_area_size(vm))
2083 continue;
2084 while (addr < vaddr) {
2085 if (count == 0)
2086 goto finished;
2087 buf++;
2088 addr++;
2089 count--;
2090 }
2091 n = vaddr + get_vm_area_size(vm) - addr;
2092 if (n > count)
2093 n = count;
2094 if (!(vm->flags & VM_IOREMAP)) {
2095 aligned_vwrite(buf, addr, n);
2096 copied++;
2097 }
2098 buf += n;
2099 addr += n;
2100 count -= n;
2101 }
2102 finished:
2103 spin_unlock(&vmap_area_lock);
2104 if (!copied)
2105 return 0;
2106 return buflen;
2107 }
2108
2109 /**
2110 * remap_vmalloc_range_partial - map vmalloc pages to userspace
2111 * @vma: vma to cover
2112 * @uaddr: target user address to start at
2113 * @kaddr: virtual address of vmalloc kernel memory
2114 * @size: size of map area
2115 *
2116 * Returns: 0 for success, -Exxx on failure
2117 *
2118 * This function checks that @kaddr is a valid vmalloc'ed area,
2119 * and that it is big enough to cover the range starting at
2120 * @uaddr in @vma. Will return failure if that criteria isn't
2121 * met.
2122 *
2123 * Similar to remap_pfn_range() (see mm/memory.c)
2124 */
remap_vmalloc_range_partial(struct vm_area_struct * vma,unsigned long uaddr,void * kaddr,unsigned long size)2125 int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
2126 void *kaddr, unsigned long size)
2127 {
2128 struct vm_struct *area;
2129
2130 size = PAGE_ALIGN(size);
2131
2132 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
2133 return -EINVAL;
2134
2135 area = find_vm_area(kaddr);
2136 if (!area)
2137 return -EINVAL;
2138
2139 if (!(area->flags & VM_USERMAP))
2140 return -EINVAL;
2141
2142 if (kaddr + size > area->addr + area->size)
2143 return -EINVAL;
2144
2145 do {
2146 struct page *page = vmalloc_to_page(kaddr);
2147 int ret;
2148
2149 ret = vm_insert_page(vma, uaddr, page);
2150 if (ret)
2151 return ret;
2152
2153 uaddr += PAGE_SIZE;
2154 kaddr += PAGE_SIZE;
2155 size -= PAGE_SIZE;
2156 } while (size > 0);
2157
2158 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
2159
2160 return 0;
2161 }
2162 EXPORT_SYMBOL(remap_vmalloc_range_partial);
2163
2164 /**
2165 * remap_vmalloc_range - map vmalloc pages to userspace
2166 * @vma: vma to cover (map full range of vma)
2167 * @addr: vmalloc memory
2168 * @pgoff: number of pages into addr before first page to map
2169 *
2170 * Returns: 0 for success, -Exxx on failure
2171 *
2172 * This function checks that addr is a valid vmalloc'ed area, and
2173 * that it is big enough to cover the vma. Will return failure if
2174 * that criteria isn't met.
2175 *
2176 * Similar to remap_pfn_range() (see mm/memory.c)
2177 */
remap_vmalloc_range(struct vm_area_struct * vma,void * addr,unsigned long pgoff)2178 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
2179 unsigned long pgoff)
2180 {
2181 return remap_vmalloc_range_partial(vma, vma->vm_start,
2182 addr + (pgoff << PAGE_SHIFT),
2183 vma->vm_end - vma->vm_start);
2184 }
2185 EXPORT_SYMBOL(remap_vmalloc_range);
2186
2187 /*
2188 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
2189 * have one.
2190 */
vmalloc_sync_all(void)2191 void __weak vmalloc_sync_all(void)
2192 {
2193 }
2194
2195
f(pte_t * pte,pgtable_t table,unsigned long addr,void * data)2196 static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
2197 {
2198 pte_t ***p = data;
2199
2200 if (p) {
2201 *(*p) = pte;
2202 (*p)++;
2203 }
2204 return 0;
2205 }
2206
2207 /**
2208 * alloc_vm_area - allocate a range of kernel address space
2209 * @size: size of the area
2210 * @ptes: returns the PTEs for the address space
2211 *
2212 * Returns: NULL on failure, vm_struct on success
2213 *
2214 * This function reserves a range of kernel address space, and
2215 * allocates pagetables to map that range. No actual mappings
2216 * are created.
2217 *
2218 * If @ptes is non-NULL, pointers to the PTEs (in init_mm)
2219 * allocated for the VM area are returned.
2220 */
alloc_vm_area(size_t size,pte_t ** ptes)2221 struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
2222 {
2223 struct vm_struct *area;
2224
2225 area = get_vm_area_caller(size, VM_IOREMAP,
2226 __builtin_return_address(0));
2227 if (area == NULL)
2228 return NULL;
2229
2230 /*
2231 * This ensures that page tables are constructed for this region
2232 * of kernel virtual address space and mapped into init_mm.
2233 */
2234 if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
2235 size, f, ptes ? &ptes : NULL)) {
2236 free_vm_area(area);
2237 return NULL;
2238 }
2239
2240 return area;
2241 }
2242 EXPORT_SYMBOL_GPL(alloc_vm_area);
2243
free_vm_area(struct vm_struct * area)2244 void free_vm_area(struct vm_struct *area)
2245 {
2246 struct vm_struct *ret;
2247 ret = remove_vm_area(area->addr);
2248 BUG_ON(ret != area);
2249 kfree(area);
2250 }
2251 EXPORT_SYMBOL_GPL(free_vm_area);
2252
2253 #ifdef CONFIG_SMP
node_to_va(struct rb_node * n)2254 static struct vmap_area *node_to_va(struct rb_node *n)
2255 {
2256 return n ? rb_entry(n, struct vmap_area, rb_node) : NULL;
2257 }
2258
2259 /**
2260 * pvm_find_next_prev - find the next and prev vmap_area surrounding @end
2261 * @end: target address
2262 * @pnext: out arg for the next vmap_area
2263 * @pprev: out arg for the previous vmap_area
2264 *
2265 * Returns: %true if either or both of next and prev are found,
2266 * %false if no vmap_area exists
2267 *
2268 * Find vmap_areas end addresses of which enclose @end. ie. if not
2269 * NULL, *pnext->va_end > @end and *pprev->va_end <= @end.
2270 */
pvm_find_next_prev(unsigned long end,struct vmap_area ** pnext,struct vmap_area ** pprev)2271 static bool pvm_find_next_prev(unsigned long end,
2272 struct vmap_area **pnext,
2273 struct vmap_area **pprev)
2274 {
2275 struct rb_node *n = vmap_area_root.rb_node;
2276 struct vmap_area *va = NULL;
2277
2278 while (n) {
2279 va = rb_entry(n, struct vmap_area, rb_node);
2280 if (end < va->va_end)
2281 n = n->rb_left;
2282 else if (end > va->va_end)
2283 n = n->rb_right;
2284 else
2285 break;
2286 }
2287
2288 if (!va)
2289 return false;
2290
2291 if (va->va_end > end) {
2292 *pnext = va;
2293 *pprev = node_to_va(rb_prev(&(*pnext)->rb_node));
2294 } else {
2295 *pprev = va;
2296 *pnext = node_to_va(rb_next(&(*pprev)->rb_node));
2297 }
2298 return true;
2299 }
2300
2301 /**
2302 * pvm_determine_end - find the highest aligned address between two vmap_areas
2303 * @pnext: in/out arg for the next vmap_area
2304 * @pprev: in/out arg for the previous vmap_area
2305 * @align: alignment
2306 *
2307 * Returns: determined end address
2308 *
2309 * Find the highest aligned address between *@pnext and *@pprev below
2310 * VMALLOC_END. *@pnext and *@pprev are adjusted so that the aligned
2311 * down address is between the end addresses of the two vmap_areas.
2312 *
2313 * Please note that the address returned by this function may fall
2314 * inside *@pnext vmap_area. The caller is responsible for checking
2315 * that.
2316 */
pvm_determine_end(struct vmap_area ** pnext,struct vmap_area ** pprev,unsigned long align)2317 static unsigned long pvm_determine_end(struct vmap_area **pnext,
2318 struct vmap_area **pprev,
2319 unsigned long align)
2320 {
2321 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
2322 unsigned long addr;
2323
2324 if (*pnext)
2325 addr = min((*pnext)->va_start & ~(align - 1), vmalloc_end);
2326 else
2327 addr = vmalloc_end;
2328
2329 while (*pprev && (*pprev)->va_end > addr) {
2330 *pnext = *pprev;
2331 *pprev = node_to_va(rb_prev(&(*pnext)->rb_node));
2332 }
2333
2334 return addr;
2335 }
2336
2337 /**
2338 * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
2339 * @offsets: array containing offset of each area
2340 * @sizes: array containing size of each area
2341 * @nr_vms: the number of areas to allocate
2342 * @align: alignment, all entries in @offsets and @sizes must be aligned to this
2343 *
2344 * Returns: kmalloc'd vm_struct pointer array pointing to allocated
2345 * vm_structs on success, %NULL on failure
2346 *
2347 * Percpu allocator wants to use congruent vm areas so that it can
2348 * maintain the offsets among percpu areas. This function allocates
2349 * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to
2350 * be scattered pretty far, distance between two areas easily going up
2351 * to gigabytes. To avoid interacting with regular vmallocs, these
2352 * areas are allocated from top.
2353 *
2354 * Despite its complicated look, this allocator is rather simple. It
2355 * does everything top-down and scans areas from the end looking for
2356 * matching slot. While scanning, if any of the areas overlaps with
2357 * existing vmap_area, the base address is pulled down to fit the
2358 * area. Scanning is repeated till all the areas fit and then all
2359 * necessary data structres are inserted and the result is returned.
2360 */
pcpu_get_vm_areas(const unsigned long * offsets,const size_t * sizes,int nr_vms,size_t align)2361 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
2362 const size_t *sizes, int nr_vms,
2363 size_t align)
2364 {
2365 const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
2366 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
2367 struct vmap_area **vas, *prev, *next;
2368 struct vm_struct **vms;
2369 int area, area2, last_area, term_area;
2370 unsigned long base, start, end, last_end;
2371 bool purged = false;
2372
2373 /* verify parameters and allocate data structures */
2374 BUG_ON(align & ~PAGE_MASK || !is_power_of_2(align));
2375 for (last_area = 0, area = 0; area < nr_vms; area++) {
2376 start = offsets[area];
2377 end = start + sizes[area];
2378
2379 /* is everything aligned properly? */
2380 BUG_ON(!IS_ALIGNED(offsets[area], align));
2381 BUG_ON(!IS_ALIGNED(sizes[area], align));
2382
2383 /* detect the area with the highest address */
2384 if (start > offsets[last_area])
2385 last_area = area;
2386
2387 for (area2 = 0; area2 < nr_vms; area2++) {
2388 unsigned long start2 = offsets[area2];
2389 unsigned long end2 = start2 + sizes[area2];
2390
2391 if (area2 == area)
2392 continue;
2393
2394 BUG_ON(start2 >= start && start2 < end);
2395 BUG_ON(end2 <= end && end2 > start);
2396 }
2397 }
2398 last_end = offsets[last_area] + sizes[last_area];
2399
2400 if (vmalloc_end - vmalloc_start < last_end) {
2401 WARN_ON(true);
2402 return NULL;
2403 }
2404
2405 vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
2406 vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
2407 if (!vas || !vms)
2408 goto err_free2;
2409
2410 for (area = 0; area < nr_vms; area++) {
2411 vas[area] = kzalloc(sizeof(struct vmap_area), GFP_KERNEL);
2412 vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
2413 if (!vas[area] || !vms[area])
2414 goto err_free;
2415 }
2416 retry:
2417 spin_lock(&vmap_area_lock);
2418
2419 /* start scanning - we scan from the top, begin with the last area */
2420 area = term_area = last_area;
2421 start = offsets[area];
2422 end = start + sizes[area];
2423
2424 if (!pvm_find_next_prev(vmap_area_pcpu_hole, &next, &prev)) {
2425 base = vmalloc_end - last_end;
2426 goto found;
2427 }
2428 base = pvm_determine_end(&next, &prev, align) - end;
2429
2430 while (true) {
2431 BUG_ON(next && next->va_end <= base + end);
2432 BUG_ON(prev && prev->va_end > base + end);
2433
2434 /*
2435 * base might have underflowed, add last_end before
2436 * comparing.
2437 */
2438 if (base + last_end < vmalloc_start + last_end) {
2439 spin_unlock(&vmap_area_lock);
2440 if (!purged) {
2441 purge_vmap_area_lazy();
2442 purged = true;
2443 goto retry;
2444 }
2445 goto err_free;
2446 }
2447
2448 /*
2449 * If next overlaps, move base downwards so that it's
2450 * right below next and then recheck.
2451 */
2452 if (next && next->va_start < base + end) {
2453 base = pvm_determine_end(&next, &prev, align) - end;
2454 term_area = area;
2455 continue;
2456 }
2457
2458 /*
2459 * If prev overlaps, shift down next and prev and move
2460 * base so that it's right below new next and then
2461 * recheck.
2462 */
2463 if (prev && prev->va_end > base + start) {
2464 next = prev;
2465 prev = node_to_va(rb_prev(&next->rb_node));
2466 base = pvm_determine_end(&next, &prev, align) - end;
2467 term_area = area;
2468 continue;
2469 }
2470
2471 /*
2472 * This area fits, move on to the previous one. If
2473 * the previous one is the terminal one, we're done.
2474 */
2475 area = (area + nr_vms - 1) % nr_vms;
2476 if (area == term_area)
2477 break;
2478 start = offsets[area];
2479 end = start + sizes[area];
2480 pvm_find_next_prev(base + end, &next, &prev);
2481 }
2482 found:
2483 /* we've found a fitting base, insert all va's */
2484 for (area = 0; area < nr_vms; area++) {
2485 struct vmap_area *va = vas[area];
2486
2487 va->va_start = base + offsets[area];
2488 va->va_end = va->va_start + sizes[area];
2489 __insert_vmap_area(va);
2490 }
2491
2492 vmap_area_pcpu_hole = base + offsets[last_area];
2493
2494 spin_unlock(&vmap_area_lock);
2495
2496 /* insert all vm's */
2497 for (area = 0; area < nr_vms; area++)
2498 setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC,
2499 pcpu_get_vm_areas);
2500
2501 kfree(vas);
2502 return vms;
2503
2504 err_free:
2505 for (area = 0; area < nr_vms; area++) {
2506 kfree(vas[area]);
2507 kfree(vms[area]);
2508 }
2509 err_free2:
2510 kfree(vas);
2511 kfree(vms);
2512 return NULL;
2513 }
2514
2515 /**
2516 * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
2517 * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
2518 * @nr_vms: the number of allocated areas
2519 *
2520 * Free vm_structs and the array allocated by pcpu_get_vm_areas().
2521 */
pcpu_free_vm_areas(struct vm_struct ** vms,int nr_vms)2522 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
2523 {
2524 int i;
2525
2526 for (i = 0; i < nr_vms; i++)
2527 free_vm_area(vms[i]);
2528 kfree(vms);
2529 }
2530 #endif /* CONFIG_SMP */
2531
2532 #ifdef CONFIG_PROC_FS
s_start(struct seq_file * m,loff_t * pos)2533 static void *s_start(struct seq_file *m, loff_t *pos)
2534 __acquires(&vmap_area_lock)
2535 {
2536 loff_t n = *pos;
2537 struct vmap_area *va;
2538
2539 spin_lock(&vmap_area_lock);
2540 va = list_entry((&vmap_area_list)->next, typeof(*va), list);
2541 while (n > 0 && &va->list != &vmap_area_list) {
2542 n--;
2543 va = list_entry(va->list.next, typeof(*va), list);
2544 }
2545 if (!n && &va->list != &vmap_area_list)
2546 return va;
2547
2548 return NULL;
2549
2550 }
2551
s_next(struct seq_file * m,void * p,loff_t * pos)2552 static void *s_next(struct seq_file *m, void *p, loff_t *pos)
2553 {
2554 struct vmap_area *va = p, *next;
2555
2556 ++*pos;
2557 next = list_entry(va->list.next, typeof(*va), list);
2558 if (&next->list != &vmap_area_list)
2559 return next;
2560
2561 return NULL;
2562 }
2563
s_stop(struct seq_file * m,void * p)2564 static void s_stop(struct seq_file *m, void *p)
2565 __releases(&vmap_area_lock)
2566 {
2567 spin_unlock(&vmap_area_lock);
2568 }
2569
show_numa_info(struct seq_file * m,struct vm_struct * v)2570 static void show_numa_info(struct seq_file *m, struct vm_struct *v)
2571 {
2572 if (IS_ENABLED(CONFIG_NUMA)) {
2573 unsigned int nr, *counters = m->private;
2574
2575 if (!counters)
2576 return;
2577
2578 /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
2579 smp_rmb();
2580 if (v->flags & VM_UNINITIALIZED)
2581 return;
2582
2583 memset(counters, 0, nr_node_ids * sizeof(unsigned int));
2584
2585 for (nr = 0; nr < v->nr_pages; nr++)
2586 counters[page_to_nid(v->pages[nr])]++;
2587
2588 for_each_node_state(nr, N_HIGH_MEMORY)
2589 if (counters[nr])
2590 seq_printf(m, " N%u=%u", nr, counters[nr]);
2591 }
2592 }
2593
s_show(struct seq_file * m,void * p)2594 static int s_show(struct seq_file *m, void *p)
2595 {
2596 struct vmap_area *va = p;
2597 struct vm_struct *v;
2598
2599 /*
2600 * s_show can encounter race with remove_vm_area, !VM_VM_AREA on
2601 * behalf of vmap area is being tear down or vm_map_ram allocation.
2602 */
2603 if (!(va->flags & VM_VM_AREA))
2604 return 0;
2605
2606 v = va->vm;
2607
2608 seq_printf(m, "0x%pK-0x%pK %7ld",
2609 v->addr, v->addr + v->size, v->size);
2610
2611 if (v->caller)
2612 seq_printf(m, " %pS", v->caller);
2613
2614 if (v->nr_pages)
2615 seq_printf(m, " pages=%d", v->nr_pages);
2616
2617 if (v->phys_addr)
2618 seq_printf(m, " phys=%llx", (unsigned long long)v->phys_addr);
2619
2620 if (v->flags & VM_IOREMAP)
2621 seq_puts(m, " ioremap");
2622
2623 if (v->flags & VM_ALLOC)
2624 seq_puts(m, " vmalloc");
2625
2626 if (v->flags & VM_MAP)
2627 seq_puts(m, " vmap");
2628
2629 if (v->flags & VM_USERMAP)
2630 seq_puts(m, " user");
2631
2632 if (v->flags & VM_VPAGES)
2633 seq_puts(m, " vpages");
2634
2635 show_numa_info(m, v);
2636 seq_putc(m, '\n');
2637 return 0;
2638 }
2639
2640 static const struct seq_operations vmalloc_op = {
2641 .start = s_start,
2642 .next = s_next,
2643 .stop = s_stop,
2644 .show = s_show,
2645 };
2646
vmalloc_open(struct inode * inode,struct file * file)2647 static int vmalloc_open(struct inode *inode, struct file *file)
2648 {
2649 if (IS_ENABLED(CONFIG_NUMA))
2650 return seq_open_private(file, &vmalloc_op,
2651 nr_node_ids * sizeof(unsigned int));
2652 else
2653 return seq_open(file, &vmalloc_op);
2654 }
2655
2656 static const struct file_operations proc_vmalloc_operations = {
2657 .open = vmalloc_open,
2658 .read = seq_read,
2659 .llseek = seq_lseek,
2660 .release = seq_release_private,
2661 };
2662
proc_vmalloc_init(void)2663 static int __init proc_vmalloc_init(void)
2664 {
2665 proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations);
2666 return 0;
2667 }
2668 module_init(proc_vmalloc_init);
2669
get_vmalloc_info(struct vmalloc_info * vmi)2670 void get_vmalloc_info(struct vmalloc_info *vmi)
2671 {
2672 struct vmap_area *va;
2673 unsigned long free_area_size;
2674 unsigned long prev_end;
2675
2676 vmi->used = 0;
2677 vmi->largest_chunk = 0;
2678
2679 prev_end = VMALLOC_START;
2680
2681 rcu_read_lock();
2682
2683 if (list_empty(&vmap_area_list)) {
2684 vmi->largest_chunk = VMALLOC_TOTAL;
2685 goto out;
2686 }
2687
2688 list_for_each_entry_rcu(va, &vmap_area_list, list) {
2689 unsigned long addr = va->va_start;
2690
2691 /*
2692 * Some archs keep another range for modules in vmalloc space
2693 */
2694 if (addr < VMALLOC_START)
2695 continue;
2696 if (addr >= VMALLOC_END)
2697 break;
2698
2699 if (va->flags & (VM_LAZY_FREE | VM_LAZY_FREEING))
2700 continue;
2701
2702 vmi->used += (va->va_end - va->va_start);
2703
2704 free_area_size = addr - prev_end;
2705 if (vmi->largest_chunk < free_area_size)
2706 vmi->largest_chunk = free_area_size;
2707
2708 prev_end = va->va_end;
2709 }
2710
2711 if (VMALLOC_END - prev_end > vmi->largest_chunk)
2712 vmi->largest_chunk = VMALLOC_END - prev_end;
2713
2714 out:
2715 rcu_read_unlock();
2716 }
2717 #endif
2718
2719