• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Virtual Memory Map support
3  *
4  * (C) 2007 sgi. Christoph Lameter.
5  *
6  * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn,
7  * virt_to_page, page_address() to be implemented as a base offset
8  * calculation without memory access.
9  *
10  * However, virtual mappings need a page table and TLBs. Many Linux
11  * architectures already map their physical space using 1-1 mappings
12  * via TLBs. For those arches the virtual memmory map is essentially
13  * for free if we use the same page size as the 1-1 mappings. In that
14  * case the overhead consists of a few additional pages that are
15  * allocated to create a view of memory for vmemmap.
16  *
17  * The architecture is expected to provide a vmemmap_populate() function
18  * to instantiate the mapping.
19  */
20 #include <linux/mm.h>
21 #include <linux/mmzone.h>
22 #include <linux/bootmem.h>
23 #include <linux/highmem.h>
24 #include <linux/module.h>
25 #include <linux/spinlock.h>
26 #include <linux/vmalloc.h>
27 #include <linux/sched.h>
28 #include <asm/dma.h>
29 #include <asm/pgalloc.h>
30 #include <asm/pgtable.h>
31 
32 /*
33  * Allocate a block of memory to be used to back the virtual memory map
34  * or to back the page tables that are used to create the mapping.
35  * Uses the main allocators if they are available, else bootmem.
36  */
37 
__earlyonly_bootmem_alloc(int node,unsigned long size,unsigned long align,unsigned long goal)38 static void * __init_refok __earlyonly_bootmem_alloc(int node,
39 				unsigned long size,
40 				unsigned long align,
41 				unsigned long goal)
42 {
43 	return __alloc_bootmem_node(NODE_DATA(node), size, align, goal);
44 }
45 
46 
vmemmap_alloc_block(unsigned long size,int node)47 void * __meminit vmemmap_alloc_block(unsigned long size, int node)
48 {
49 	/* If the main allocator is up use that, fallback to bootmem. */
50 	if (slab_is_available()) {
51 		struct page *page = alloc_pages_node(node,
52 				GFP_KERNEL | __GFP_ZERO, get_order(size));
53 		if (page)
54 			return page_address(page);
55 		return NULL;
56 	} else
57 		return __earlyonly_bootmem_alloc(node, size, size,
58 				__pa(MAX_DMA_ADDRESS));
59 }
60 
vmemmap_verify(pte_t * pte,int node,unsigned long start,unsigned long end)61 void __meminit vmemmap_verify(pte_t *pte, int node,
62 				unsigned long start, unsigned long end)
63 {
64 	unsigned long pfn = pte_pfn(*pte);
65 	int actual_node = early_pfn_to_nid(pfn);
66 
67 	if (node_distance(actual_node, node) > LOCAL_DISTANCE)
68 		printk(KERN_WARNING "[%lx-%lx] potential offnode "
69 			"page_structs\n", start, end - 1);
70 }
71 
vmemmap_pte_populate(pmd_t * pmd,unsigned long addr,int node)72 pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node)
73 {
74 	pte_t *pte = pte_offset_kernel(pmd, addr);
75 	if (pte_none(*pte)) {
76 		pte_t entry;
77 		void *p = vmemmap_alloc_block(PAGE_SIZE, node);
78 		if (!p)
79 			return NULL;
80 		entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
81 		set_pte_at(&init_mm, addr, pte, entry);
82 	}
83 	return pte;
84 }
85 
vmemmap_pmd_populate(pud_t * pud,unsigned long addr,int node)86 pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
87 {
88 	pmd_t *pmd = pmd_offset(pud, addr);
89 	if (pmd_none(*pmd)) {
90 		void *p = vmemmap_alloc_block(PAGE_SIZE, node);
91 		if (!p)
92 			return NULL;
93 		pmd_populate_kernel(&init_mm, pmd, p);
94 	}
95 	return pmd;
96 }
97 
vmemmap_pud_populate(pgd_t * pgd,unsigned long addr,int node)98 pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
99 {
100 	pud_t *pud = pud_offset(pgd, addr);
101 	if (pud_none(*pud)) {
102 		void *p = vmemmap_alloc_block(PAGE_SIZE, node);
103 		if (!p)
104 			return NULL;
105 		pud_populate(&init_mm, pud, p);
106 	}
107 	return pud;
108 }
109 
vmemmap_pgd_populate(unsigned long addr,int node)110 pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
111 {
112 	pgd_t *pgd = pgd_offset_k(addr);
113 	if (pgd_none(*pgd)) {
114 		void *p = vmemmap_alloc_block(PAGE_SIZE, node);
115 		if (!p)
116 			return NULL;
117 		pgd_populate(&init_mm, pgd, p);
118 	}
119 	return pgd;
120 }
121 
vmemmap_populate_basepages(struct page * start_page,unsigned long size,int node)122 int __meminit vmemmap_populate_basepages(struct page *start_page,
123 						unsigned long size, int node)
124 {
125 	unsigned long addr = (unsigned long)start_page;
126 	unsigned long end = (unsigned long)(start_page + size);
127 	pgd_t *pgd;
128 	pud_t *pud;
129 	pmd_t *pmd;
130 	pte_t *pte;
131 
132 	for (; addr < end; addr += PAGE_SIZE) {
133 		pgd = vmemmap_pgd_populate(addr, node);
134 		if (!pgd)
135 			return -ENOMEM;
136 		pud = vmemmap_pud_populate(pgd, addr, node);
137 		if (!pud)
138 			return -ENOMEM;
139 		pmd = vmemmap_pmd_populate(pud, addr, node);
140 		if (!pmd)
141 			return -ENOMEM;
142 		pte = vmemmap_pte_populate(pmd, addr, node);
143 		if (!pte)
144 			return -ENOMEM;
145 		vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
146 	}
147 
148 	return 0;
149 }
150 
sparse_mem_map_populate(unsigned long pnum,int nid)151 struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid)
152 {
153 	struct page *map = pfn_to_page(pnum * PAGES_PER_SECTION);
154 	int error = vmemmap_populate(map, PAGES_PER_SECTION, nid);
155 	if (error)
156 		return NULL;
157 
158 	return map;
159 }
160