1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/arch/x86_64/mm/init.c
4 *
5 * Copyright (C) 1995 Linus Torvalds
6 * Copyright (C) 2000 Pavel Machek <pavel@ucw.cz>
7 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
8 */
9
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/ptrace.h>
17 #include <linux/mman.h>
18 #include <linux/mm.h>
19 #include <linux/swap.h>
20 #include <linux/smp.h>
21 #include <linux/init.h>
22 #include <linux/initrd.h>
23 #include <linux/pagemap.h>
24 #include <linux/memblock.h>
25 #include <linux/proc_fs.h>
26 #include <linux/pci.h>
27 #include <linux/pfn.h>
28 #include <linux/poison.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/memory.h>
31 #include <linux/memory_hotplug.h>
32 #include <linux/memremap.h>
33 #include <linux/nmi.h>
34 #include <linux/gfp.h>
35 #include <linux/kcore.h>
36 #include <linux/bootmem_info.h>
37
38 #include <asm/processor.h>
39 #include <asm/bios_ebda.h>
40 #include <linux/uaccess.h>
41 #include <asm/pgalloc.h>
42 #include <asm/dma.h>
43 #include <asm/fixmap.h>
44 #include <asm/e820/api.h>
45 #include <asm/apic.h>
46 #include <asm/tlb.h>
47 #include <asm/mmu_context.h>
48 #include <asm/proto.h>
49 #include <asm/smp.h>
50 #include <asm/sections.h>
51 #include <asm/kdebug.h>
52 #include <asm/numa.h>
53 #include <asm/set_memory.h>
54 #include <asm/init.h>
55 #include <asm/uv/uv.h>
56 #include <asm/setup.h>
57 #include <asm/ftrace.h>
58
59 #include "mm_internal.h"
60
61 #include "ident_map.c"
62
63 #define DEFINE_POPULATE(fname, type1, type2, init) \
64 static inline void fname##_init(struct mm_struct *mm, \
65 type1##_t *arg1, type2##_t *arg2, bool init) \
66 { \
67 if (init) \
68 fname##_safe(mm, arg1, arg2); \
69 else \
70 fname(mm, arg1, arg2); \
71 }
72
DEFINE_POPULATE(p4d_populate,p4d,pud,init)73 DEFINE_POPULATE(p4d_populate, p4d, pud, init)
74 DEFINE_POPULATE(pgd_populate, pgd, p4d, init)
75 DEFINE_POPULATE(pud_populate, pud, pmd, init)
76 DEFINE_POPULATE(pmd_populate_kernel, pmd, pte, init)
77
78 #define DEFINE_ENTRY(type1, type2, init) \
79 static inline void set_##type1##_init(type1##_t *arg1, \
80 type2##_t arg2, bool init) \
81 { \
82 if (init) \
83 set_##type1##_safe(arg1, arg2); \
84 else \
85 set_##type1(arg1, arg2); \
86 }
87
88 DEFINE_ENTRY(p4d, p4d, init)
89 DEFINE_ENTRY(pud, pud, init)
90 DEFINE_ENTRY(pmd, pmd, init)
91 DEFINE_ENTRY(pte, pte, init)
92
93 static inline pgprot_t prot_sethuge(pgprot_t prot)
94 {
95 WARN_ON_ONCE(pgprot_val(prot) & _PAGE_PAT);
96
97 return __pgprot(pgprot_val(prot) | _PAGE_PSE);
98 }
99
100 /*
101 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
102 * physical space so we can cache the place of the first one and move
103 * around without checking the pgd every time.
104 */
105
106 /* Bits supported by the hardware: */
107 pteval_t __supported_pte_mask __read_mostly = ~0;
108 /* Bits allowed in normal kernel mappings: */
109 pteval_t __default_kernel_pte_mask __read_mostly = ~0;
110 EXPORT_SYMBOL_GPL(__supported_pte_mask);
111 /* Used in PAGE_KERNEL_* macros which are reasonably used out-of-tree: */
112 EXPORT_SYMBOL(__default_kernel_pte_mask);
113
114 int force_personality32;
115
116 /*
117 * noexec32=on|off
118 * Control non executable heap for 32bit processes.
119 *
120 * on PROT_READ does not imply PROT_EXEC for 32-bit processes (default)
121 * off PROT_READ implies PROT_EXEC
122 */
nonx32_setup(char * str)123 static int __init nonx32_setup(char *str)
124 {
125 if (!strcmp(str, "on"))
126 force_personality32 &= ~READ_IMPLIES_EXEC;
127 else if (!strcmp(str, "off"))
128 force_personality32 |= READ_IMPLIES_EXEC;
129 return 1;
130 }
131 __setup("noexec32=", nonx32_setup);
132
sync_global_pgds_l5(unsigned long start,unsigned long end)133 static void sync_global_pgds_l5(unsigned long start, unsigned long end)
134 {
135 unsigned long addr;
136
137 for (addr = start; addr <= end; addr = ALIGN(addr + 1, PGDIR_SIZE)) {
138 const pgd_t *pgd_ref = pgd_offset_k(addr);
139 struct page *page;
140
141 /* Check for overflow */
142 if (addr < start)
143 break;
144
145 if (pgd_none(*pgd_ref))
146 continue;
147
148 spin_lock(&pgd_lock);
149 list_for_each_entry(page, &pgd_list, lru) {
150 pgd_t *pgd;
151 spinlock_t *pgt_lock;
152
153 pgd = (pgd_t *)page_address(page) + pgd_index(addr);
154 /* the pgt_lock only for Xen */
155 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
156 spin_lock(pgt_lock);
157
158 if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
159 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
160
161 if (pgd_none(*pgd))
162 set_pgd(pgd, *pgd_ref);
163
164 spin_unlock(pgt_lock);
165 }
166 spin_unlock(&pgd_lock);
167 }
168 }
169
sync_global_pgds_l4(unsigned long start,unsigned long end)170 static void sync_global_pgds_l4(unsigned long start, unsigned long end)
171 {
172 unsigned long addr;
173
174 for (addr = start; addr <= end; addr = ALIGN(addr + 1, PGDIR_SIZE)) {
175 pgd_t *pgd_ref = pgd_offset_k(addr);
176 const p4d_t *p4d_ref;
177 struct page *page;
178
179 /*
180 * With folded p4d, pgd_none() is always false, we need to
181 * handle synchronization on p4d level.
182 */
183 MAYBE_BUILD_BUG_ON(pgd_none(*pgd_ref));
184 p4d_ref = p4d_offset(pgd_ref, addr);
185
186 if (p4d_none(*p4d_ref))
187 continue;
188
189 spin_lock(&pgd_lock);
190 list_for_each_entry(page, &pgd_list, lru) {
191 pgd_t *pgd;
192 p4d_t *p4d;
193 spinlock_t *pgt_lock;
194
195 pgd = (pgd_t *)page_address(page) + pgd_index(addr);
196 p4d = p4d_offset(pgd, addr);
197 /* the pgt_lock only for Xen */
198 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
199 spin_lock(pgt_lock);
200
201 if (!p4d_none(*p4d_ref) && !p4d_none(*p4d))
202 BUG_ON(p4d_pgtable(*p4d)
203 != p4d_pgtable(*p4d_ref));
204
205 if (p4d_none(*p4d))
206 set_p4d(p4d, *p4d_ref);
207
208 spin_unlock(pgt_lock);
209 }
210 spin_unlock(&pgd_lock);
211 }
212 }
213
214 /*
215 * When memory was added make sure all the processes MM have
216 * suitable PGD entries in the local PGD level page.
217 */
sync_global_pgds(unsigned long start,unsigned long end)218 static void sync_global_pgds(unsigned long start, unsigned long end)
219 {
220 if (pgtable_l5_enabled())
221 sync_global_pgds_l5(start, end);
222 else
223 sync_global_pgds_l4(start, end);
224 }
225
226 /*
227 * Make kernel mappings visible in all page tables in the system.
228 * This is necessary except when the init task populates kernel mappings
229 * during the boot process. In that case, all processes originating from
230 * the init task copies the kernel mappings, so there is no issue.
231 * Otherwise, missing synchronization could lead to kernel crashes due
232 * to missing page table entries for certain kernel mappings.
233 *
234 * Synchronization is performed at the top level, which is the PGD in
235 * 5-level paging systems. But in 4-level paging systems, however,
236 * pgd_populate() is a no-op, so synchronization is done at the P4D level.
237 * sync_global_pgds() handles this difference between paging levels.
238 */
arch_sync_kernel_mappings(unsigned long start,unsigned long end)239 void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
240 {
241 sync_global_pgds(start, end);
242 }
243
244 /*
245 * NOTE: This function is marked __ref because it calls __init function
246 * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
247 */
spp_getpage(void)248 static __ref void *spp_getpage(void)
249 {
250 void *ptr;
251
252 if (after_bootmem)
253 ptr = (void *) get_zeroed_page(GFP_ATOMIC);
254 else
255 ptr = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
256
257 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) {
258 panic("set_pte_phys: cannot allocate page data %s\n",
259 after_bootmem ? "after bootmem" : "");
260 }
261
262 pr_debug("spp_getpage %p\n", ptr);
263
264 return ptr;
265 }
266
fill_p4d(pgd_t * pgd,unsigned long vaddr)267 static p4d_t *fill_p4d(pgd_t *pgd, unsigned long vaddr)
268 {
269 if (pgd_none(*pgd)) {
270 p4d_t *p4d = (p4d_t *)spp_getpage();
271 pgd_populate(&init_mm, pgd, p4d);
272 if (p4d != p4d_offset(pgd, 0))
273 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
274 p4d, p4d_offset(pgd, 0));
275 }
276 return p4d_offset(pgd, vaddr);
277 }
278
fill_pud(p4d_t * p4d,unsigned long vaddr)279 static pud_t *fill_pud(p4d_t *p4d, unsigned long vaddr)
280 {
281 if (p4d_none(*p4d)) {
282 pud_t *pud = (pud_t *)spp_getpage();
283 p4d_populate(&init_mm, p4d, pud);
284 if (pud != pud_offset(p4d, 0))
285 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
286 pud, pud_offset(p4d, 0));
287 }
288 return pud_offset(p4d, vaddr);
289 }
290
fill_pmd(pud_t * pud,unsigned long vaddr)291 static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
292 {
293 if (pud_none(*pud)) {
294 pmd_t *pmd = (pmd_t *) spp_getpage();
295 pud_populate(&init_mm, pud, pmd);
296 if (pmd != pmd_offset(pud, 0))
297 printk(KERN_ERR "PAGETABLE BUG #02! %p <-> %p\n",
298 pmd, pmd_offset(pud, 0));
299 }
300 return pmd_offset(pud, vaddr);
301 }
302
fill_pte(pmd_t * pmd,unsigned long vaddr)303 static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr)
304 {
305 if (pmd_none(*pmd)) {
306 pte_t *pte = (pte_t *) spp_getpage();
307 pmd_populate_kernel(&init_mm, pmd, pte);
308 if (pte != pte_offset_kernel(pmd, 0))
309 printk(KERN_ERR "PAGETABLE BUG #03!\n");
310 }
311 return pte_offset_kernel(pmd, vaddr);
312 }
313
__set_pte_vaddr(pud_t * pud,unsigned long vaddr,pte_t new_pte)314 static void __set_pte_vaddr(pud_t *pud, unsigned long vaddr, pte_t new_pte)
315 {
316 pmd_t *pmd = fill_pmd(pud, vaddr);
317 pte_t *pte = fill_pte(pmd, vaddr);
318
319 set_pte(pte, new_pte);
320
321 /*
322 * It's enough to flush this one mapping.
323 * (PGE mappings get flushed as well)
324 */
325 flush_tlb_one_kernel(vaddr);
326 }
327
set_pte_vaddr_p4d(p4d_t * p4d_page,unsigned long vaddr,pte_t new_pte)328 void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte)
329 {
330 p4d_t *p4d = p4d_page + p4d_index(vaddr);
331 pud_t *pud = fill_pud(p4d, vaddr);
332
333 __set_pte_vaddr(pud, vaddr, new_pte);
334 }
335
set_pte_vaddr_pud(pud_t * pud_page,unsigned long vaddr,pte_t new_pte)336 void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
337 {
338 pud_t *pud = pud_page + pud_index(vaddr);
339
340 __set_pte_vaddr(pud, vaddr, new_pte);
341 }
342
set_pte_vaddr(unsigned long vaddr,pte_t pteval)343 void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
344 {
345 pgd_t *pgd;
346 p4d_t *p4d_page;
347
348 pr_debug("set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval));
349
350 pgd = pgd_offset_k(vaddr);
351 if (pgd_none(*pgd)) {
352 printk(KERN_ERR
353 "PGD FIXMAP MISSING, it should be setup in head.S!\n");
354 return;
355 }
356
357 p4d_page = p4d_offset(pgd, 0);
358 set_pte_vaddr_p4d(p4d_page, vaddr, pteval);
359 }
360
populate_extra_pmd(unsigned long vaddr)361 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
362 {
363 pgd_t *pgd;
364 p4d_t *p4d;
365 pud_t *pud;
366
367 pgd = pgd_offset_k(vaddr);
368 p4d = fill_p4d(pgd, vaddr);
369 pud = fill_pud(p4d, vaddr);
370 return fill_pmd(pud, vaddr);
371 }
372
populate_extra_pte(unsigned long vaddr)373 pte_t * __init populate_extra_pte(unsigned long vaddr)
374 {
375 pmd_t *pmd;
376
377 pmd = populate_extra_pmd(vaddr);
378 return fill_pte(pmd, vaddr);
379 }
380
381 /*
382 * Create large page table mappings for a range of physical addresses.
383 */
__init_extra_mapping(unsigned long phys,unsigned long size,enum page_cache_mode cache)384 static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
385 enum page_cache_mode cache)
386 {
387 pgd_t *pgd;
388 p4d_t *p4d;
389 pud_t *pud;
390 pmd_t *pmd;
391 pgprot_t prot;
392
393 pgprot_val(prot) = pgprot_val(PAGE_KERNEL_LARGE) |
394 protval_4k_2_large(cachemode2protval(cache));
395 BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK));
396 for (; size; phys += PMD_SIZE, size -= PMD_SIZE) {
397 pgd = pgd_offset_k((unsigned long)__va(phys));
398 if (pgd_none(*pgd)) {
399 p4d = (p4d_t *) spp_getpage();
400 set_pgd(pgd, __pgd(__pa(p4d) | _KERNPG_TABLE |
401 _PAGE_USER));
402 }
403 p4d = p4d_offset(pgd, (unsigned long)__va(phys));
404 if (p4d_none(*p4d)) {
405 pud = (pud_t *) spp_getpage();
406 set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE |
407 _PAGE_USER));
408 }
409 pud = pud_offset(p4d, (unsigned long)__va(phys));
410 if (pud_none(*pud)) {
411 pmd = (pmd_t *) spp_getpage();
412 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
413 _PAGE_USER));
414 }
415 pmd = pmd_offset(pud, phys);
416 BUG_ON(!pmd_none(*pmd));
417 set_pmd(pmd, __pmd(phys | pgprot_val(prot)));
418 }
419 }
420
init_extra_mapping_wb(unsigned long phys,unsigned long size)421 void __init init_extra_mapping_wb(unsigned long phys, unsigned long size)
422 {
423 __init_extra_mapping(phys, size, _PAGE_CACHE_MODE_WB);
424 }
425
init_extra_mapping_uc(unsigned long phys,unsigned long size)426 void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
427 {
428 __init_extra_mapping(phys, size, _PAGE_CACHE_MODE_UC);
429 }
430
431 /*
432 * The head.S code sets up the kernel high mapping:
433 *
434 * from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text)
435 *
436 * phys_base holds the negative offset to the kernel, which is added
437 * to the compile time generated pmds. This results in invalid pmds up
438 * to the point where we hit the physaddr 0 mapping.
439 *
440 * We limit the mappings to the region from _text to _brk_end. _brk_end
441 * is rounded up to the 2MB boundary. This catches the invalid pmds as
442 * well, as they are located before _text:
443 */
cleanup_highmap(void)444 void __init cleanup_highmap(void)
445 {
446 unsigned long vaddr = __START_KERNEL_map;
447 unsigned long vaddr_end = __START_KERNEL_map + KERNEL_IMAGE_SIZE;
448 unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
449 pmd_t *pmd = level2_kernel_pgt;
450
451 /*
452 * Native path, max_pfn_mapped is not set yet.
453 * Xen has valid max_pfn_mapped set in
454 * arch/x86/xen/mmu.c:xen_setup_kernel_pagetable().
455 */
456 if (max_pfn_mapped)
457 vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT);
458
459 for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) {
460 if (pmd_none(*pmd))
461 continue;
462 if (vaddr < (unsigned long) _text || vaddr > end)
463 set_pmd(pmd, __pmd(0));
464 }
465 }
466
467 /*
468 * Create PTE level page table mapping for physical addresses.
469 * It returns the last physical address mapped.
470 */
471 static unsigned long __meminit
phys_pte_init(pte_t * pte_page,unsigned long paddr,unsigned long paddr_end,pgprot_t prot,bool init)472 phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end,
473 pgprot_t prot, bool init)
474 {
475 unsigned long pages = 0, paddr_next;
476 unsigned long paddr_last = paddr_end;
477 pte_t *pte;
478 int i;
479
480 pte = pte_page + pte_index(paddr);
481 i = pte_index(paddr);
482
483 for (; i < PTRS_PER_PTE; i++, paddr = paddr_next, pte++) {
484 paddr_next = (paddr & PAGE_MASK) + PAGE_SIZE;
485 if (paddr >= paddr_end) {
486 if (!after_bootmem &&
487 !e820__mapped_any(paddr & PAGE_MASK, paddr_next,
488 E820_TYPE_RAM) &&
489 !e820__mapped_any(paddr & PAGE_MASK, paddr_next,
490 E820_TYPE_RESERVED_KERN) &&
491 !e820__mapped_any(paddr & PAGE_MASK, paddr_next,
492 E820_TYPE_ACPI))
493 set_pte_init(pte, __pte(0), init);
494 continue;
495 }
496
497 /*
498 * We will re-use the existing mapping.
499 * Xen for example has some special requirements, like mapping
500 * pagetable pages as RO. So assume someone who pre-setup
501 * these mappings are more intelligent.
502 */
503 if (!pte_none(*pte)) {
504 if (!after_bootmem)
505 pages++;
506 continue;
507 }
508
509 if (0)
510 pr_info(" pte=%p addr=%lx pte=%016lx\n", pte, paddr,
511 pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL).pte);
512 pages++;
513 set_pte_init(pte, pfn_pte(paddr >> PAGE_SHIFT, prot), init);
514 paddr_last = (paddr & PAGE_MASK) + PAGE_SIZE;
515 }
516
517 update_page_count(PG_LEVEL_4K, pages);
518
519 return paddr_last;
520 }
521
522 /*
523 * Create PMD level page table mapping for physical addresses. The virtual
524 * and physical address have to be aligned at this level.
525 * It returns the last physical address mapped.
526 */
527 static unsigned long __meminit
phys_pmd_init(pmd_t * pmd_page,unsigned long paddr,unsigned long paddr_end,unsigned long page_size_mask,pgprot_t prot,bool init)528 phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
529 unsigned long page_size_mask, pgprot_t prot, bool init)
530 {
531 unsigned long pages = 0, paddr_next;
532 unsigned long paddr_last = paddr_end;
533
534 int i = pmd_index(paddr);
535
536 for (; i < PTRS_PER_PMD; i++, paddr = paddr_next) {
537 pmd_t *pmd = pmd_page + pmd_index(paddr);
538 pte_t *pte;
539 pgprot_t new_prot = prot;
540
541 paddr_next = (paddr & PMD_MASK) + PMD_SIZE;
542 if (paddr >= paddr_end) {
543 if (!after_bootmem &&
544 !e820__mapped_any(paddr & PMD_MASK, paddr_next,
545 E820_TYPE_RAM) &&
546 !e820__mapped_any(paddr & PMD_MASK, paddr_next,
547 E820_TYPE_RESERVED_KERN) &&
548 !e820__mapped_any(paddr & PMD_MASK, paddr_next,
549 E820_TYPE_ACPI))
550 set_pmd_init(pmd, __pmd(0), init);
551 continue;
552 }
553
554 if (!pmd_none(*pmd)) {
555 if (!pmd_leaf(*pmd)) {
556 spin_lock(&init_mm.page_table_lock);
557 pte = (pte_t *)pmd_page_vaddr(*pmd);
558 paddr_last = phys_pte_init(pte, paddr,
559 paddr_end, prot,
560 init);
561 spin_unlock(&init_mm.page_table_lock);
562 continue;
563 }
564 /*
565 * If we are ok with PG_LEVEL_2M mapping, then we will
566 * use the existing mapping,
567 *
568 * Otherwise, we will split the large page mapping but
569 * use the same existing protection bits except for
570 * large page, so that we don't violate Intel's TLB
571 * Application note (317080) which says, while changing
572 * the page sizes, new and old translations should
573 * not differ with respect to page frame and
574 * attributes.
575 */
576 if (page_size_mask & (1 << PG_LEVEL_2M)) {
577 if (!after_bootmem)
578 pages++;
579 paddr_last = paddr_next;
580 continue;
581 }
582 new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd));
583 }
584
585 if (page_size_mask & (1<<PG_LEVEL_2M)) {
586 pages++;
587 spin_lock(&init_mm.page_table_lock);
588 set_pmd_init(pmd,
589 pfn_pmd(paddr >> PAGE_SHIFT, prot_sethuge(prot)),
590 init);
591 spin_unlock(&init_mm.page_table_lock);
592 paddr_last = paddr_next;
593 continue;
594 }
595
596 pte = alloc_low_page();
597 paddr_last = phys_pte_init(pte, paddr, paddr_end, new_prot, init);
598
599 spin_lock(&init_mm.page_table_lock);
600 pmd_populate_kernel_init(&init_mm, pmd, pte, init);
601 spin_unlock(&init_mm.page_table_lock);
602 }
603 update_page_count(PG_LEVEL_2M, pages);
604 return paddr_last;
605 }
606
607 /*
608 * Create PUD level page table mapping for physical addresses. The virtual
609 * and physical address do not have to be aligned at this level. KASLR can
610 * randomize virtual addresses up to this level.
611 * It returns the last physical address mapped.
612 */
613 static unsigned long __meminit
phys_pud_init(pud_t * pud_page,unsigned long paddr,unsigned long paddr_end,unsigned long page_size_mask,pgprot_t _prot,bool init)614 phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
615 unsigned long page_size_mask, pgprot_t _prot, bool init)
616 {
617 unsigned long pages = 0, paddr_next;
618 unsigned long paddr_last = paddr_end;
619 unsigned long vaddr = (unsigned long)__va(paddr);
620 int i = pud_index(vaddr);
621
622 for (; i < PTRS_PER_PUD; i++, paddr = paddr_next) {
623 pud_t *pud;
624 pmd_t *pmd;
625 pgprot_t prot = _prot;
626
627 vaddr = (unsigned long)__va(paddr);
628 pud = pud_page + pud_index(vaddr);
629 paddr_next = (paddr & PUD_MASK) + PUD_SIZE;
630
631 if (paddr >= paddr_end) {
632 if (!after_bootmem &&
633 !e820__mapped_any(paddr & PUD_MASK, paddr_next,
634 E820_TYPE_RAM) &&
635 !e820__mapped_any(paddr & PUD_MASK, paddr_next,
636 E820_TYPE_RESERVED_KERN) &&
637 !e820__mapped_any(paddr & PUD_MASK, paddr_next,
638 E820_TYPE_ACPI))
639 set_pud_init(pud, __pud(0), init);
640 continue;
641 }
642
643 if (!pud_none(*pud)) {
644 if (!pud_leaf(*pud)) {
645 pmd = pmd_offset(pud, 0);
646 paddr_last = phys_pmd_init(pmd, paddr,
647 paddr_end,
648 page_size_mask,
649 prot, init);
650 continue;
651 }
652 /*
653 * If we are ok with PG_LEVEL_1G mapping, then we will
654 * use the existing mapping.
655 *
656 * Otherwise, we will split the gbpage mapping but use
657 * the same existing protection bits except for large
658 * page, so that we don't violate Intel's TLB
659 * Application note (317080) which says, while changing
660 * the page sizes, new and old translations should
661 * not differ with respect to page frame and
662 * attributes.
663 */
664 if (page_size_mask & (1 << PG_LEVEL_1G)) {
665 if (!after_bootmem)
666 pages++;
667 paddr_last = paddr_next;
668 continue;
669 }
670 prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
671 }
672
673 if (page_size_mask & (1<<PG_LEVEL_1G)) {
674 pages++;
675 spin_lock(&init_mm.page_table_lock);
676 set_pud_init(pud,
677 pfn_pud(paddr >> PAGE_SHIFT, prot_sethuge(prot)),
678 init);
679 spin_unlock(&init_mm.page_table_lock);
680 paddr_last = paddr_next;
681 continue;
682 }
683
684 pmd = alloc_low_page();
685 paddr_last = phys_pmd_init(pmd, paddr, paddr_end,
686 page_size_mask, prot, init);
687
688 spin_lock(&init_mm.page_table_lock);
689 pud_populate_init(&init_mm, pud, pmd, init);
690 spin_unlock(&init_mm.page_table_lock);
691 }
692
693 update_page_count(PG_LEVEL_1G, pages);
694
695 return paddr_last;
696 }
697
698 static unsigned long __meminit
phys_p4d_init(p4d_t * p4d_page,unsigned long paddr,unsigned long paddr_end,unsigned long page_size_mask,pgprot_t prot,bool init)699 phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
700 unsigned long page_size_mask, pgprot_t prot, bool init)
701 {
702 unsigned long vaddr, vaddr_end, vaddr_next, paddr_next, paddr_last;
703
704 paddr_last = paddr_end;
705 vaddr = (unsigned long)__va(paddr);
706 vaddr_end = (unsigned long)__va(paddr_end);
707
708 if (!pgtable_l5_enabled())
709 return phys_pud_init((pud_t *) p4d_page, paddr, paddr_end,
710 page_size_mask, prot, init);
711
712 for (; vaddr < vaddr_end; vaddr = vaddr_next) {
713 p4d_t *p4d = p4d_page + p4d_index(vaddr);
714 pud_t *pud;
715
716 vaddr_next = (vaddr & P4D_MASK) + P4D_SIZE;
717 paddr = __pa(vaddr);
718
719 if (paddr >= paddr_end) {
720 paddr_next = __pa(vaddr_next);
721 if (!after_bootmem &&
722 !e820__mapped_any(paddr & P4D_MASK, paddr_next,
723 E820_TYPE_RAM) &&
724 !e820__mapped_any(paddr & P4D_MASK, paddr_next,
725 E820_TYPE_RESERVED_KERN) &&
726 !e820__mapped_any(paddr & P4D_MASK, paddr_next,
727 E820_TYPE_ACPI))
728 set_p4d_init(p4d, __p4d(0), init);
729 continue;
730 }
731
732 if (!p4d_none(*p4d)) {
733 pud = pud_offset(p4d, 0);
734 paddr_last = phys_pud_init(pud, paddr, __pa(vaddr_end),
735 page_size_mask, prot, init);
736 continue;
737 }
738
739 pud = alloc_low_page();
740 paddr_last = phys_pud_init(pud, paddr, __pa(vaddr_end),
741 page_size_mask, prot, init);
742
743 spin_lock(&init_mm.page_table_lock);
744 p4d_populate_init(&init_mm, p4d, pud, init);
745 spin_unlock(&init_mm.page_table_lock);
746 }
747
748 return paddr_last;
749 }
750
751 static unsigned long __meminit
__kernel_physical_mapping_init(unsigned long paddr_start,unsigned long paddr_end,unsigned long page_size_mask,pgprot_t prot,bool init)752 __kernel_physical_mapping_init(unsigned long paddr_start,
753 unsigned long paddr_end,
754 unsigned long page_size_mask,
755 pgprot_t prot, bool init)
756 {
757 bool pgd_changed = false;
758 unsigned long vaddr, vaddr_start, vaddr_end, vaddr_next, paddr_last;
759
760 paddr_last = paddr_end;
761 vaddr = (unsigned long)__va(paddr_start);
762 vaddr_end = (unsigned long)__va(paddr_end);
763 vaddr_start = vaddr;
764
765 for (; vaddr < vaddr_end; vaddr = vaddr_next) {
766 pgd_t *pgd = pgd_offset_k(vaddr);
767 p4d_t *p4d;
768
769 vaddr_next = (vaddr & PGDIR_MASK) + PGDIR_SIZE;
770
771 if (pgd_val(*pgd)) {
772 p4d = (p4d_t *)pgd_page_vaddr(*pgd);
773 paddr_last = phys_p4d_init(p4d, __pa(vaddr),
774 __pa(vaddr_end),
775 page_size_mask,
776 prot, init);
777 continue;
778 }
779
780 p4d = alloc_low_page();
781 paddr_last = phys_p4d_init(p4d, __pa(vaddr), __pa(vaddr_end),
782 page_size_mask, prot, init);
783
784 spin_lock(&init_mm.page_table_lock);
785 if (pgtable_l5_enabled())
786 pgd_populate_init(&init_mm, pgd, p4d, init);
787 else
788 p4d_populate_init(&init_mm, p4d_offset(pgd, vaddr),
789 (pud_t *) p4d, init);
790
791 spin_unlock(&init_mm.page_table_lock);
792 pgd_changed = true;
793 }
794
795 if (pgd_changed)
796 sync_global_pgds(vaddr_start, vaddr_end - 1);
797
798 return paddr_last;
799 }
800
801
802 /*
803 * Create page table mapping for the physical memory for specific physical
804 * addresses. Note that it can only be used to populate non-present entries.
805 * The virtual and physical addresses have to be aligned on PMD level
806 * down. It returns the last physical address mapped.
807 */
808 unsigned long __meminit
kernel_physical_mapping_init(unsigned long paddr_start,unsigned long paddr_end,unsigned long page_size_mask,pgprot_t prot)809 kernel_physical_mapping_init(unsigned long paddr_start,
810 unsigned long paddr_end,
811 unsigned long page_size_mask, pgprot_t prot)
812 {
813 return __kernel_physical_mapping_init(paddr_start, paddr_end,
814 page_size_mask, prot, true);
815 }
816
817 /*
818 * This function is similar to kernel_physical_mapping_init() above with the
819 * exception that it uses set_{pud,pmd}() instead of the set_{pud,pte}_safe()
820 * when updating the mapping. The caller is responsible to flush the TLBs after
821 * the function returns.
822 */
823 unsigned long __meminit
kernel_physical_mapping_change(unsigned long paddr_start,unsigned long paddr_end,unsigned long page_size_mask)824 kernel_physical_mapping_change(unsigned long paddr_start,
825 unsigned long paddr_end,
826 unsigned long page_size_mask)
827 {
828 return __kernel_physical_mapping_init(paddr_start, paddr_end,
829 page_size_mask, PAGE_KERNEL,
830 false);
831 }
832
833 #ifndef CONFIG_NUMA
initmem_init(void)834 void __init initmem_init(void)
835 {
836 memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
837 }
838 #endif
839
paging_init(void)840 void __init paging_init(void)
841 {
842 sparse_init();
843
844 /*
845 * clear the default setting with node 0
846 * note: don't use nodes_clear here, that is really clearing when
847 * numa support is not compiled in, and later node_set_state
848 * will not set it back.
849 */
850 node_clear_state(0, N_MEMORY);
851 node_clear_state(0, N_NORMAL_MEMORY);
852
853 zone_sizes_init();
854 }
855
856 #ifdef CONFIG_SPARSEMEM_VMEMMAP
857 #define PAGE_UNUSED 0xFD
858
859 /*
860 * The unused vmemmap range, which was not yet memset(PAGE_UNUSED), ranges
861 * from unused_pmd_start to next PMD_SIZE boundary.
862 */
863 static unsigned long unused_pmd_start __meminitdata;
864
vmemmap_flush_unused_pmd(void)865 static void __meminit vmemmap_flush_unused_pmd(void)
866 {
867 if (!unused_pmd_start)
868 return;
869 /*
870 * Clears (unused_pmd_start, PMD_END]
871 */
872 memset((void *)unused_pmd_start, PAGE_UNUSED,
873 ALIGN(unused_pmd_start, PMD_SIZE) - unused_pmd_start);
874 unused_pmd_start = 0;
875 }
876
877 #ifdef CONFIG_MEMORY_HOTPLUG
878 /* Returns true if the PMD is completely unused and thus it can be freed */
vmemmap_pmd_is_unused(unsigned long addr,unsigned long end)879 static bool __meminit vmemmap_pmd_is_unused(unsigned long addr, unsigned long end)
880 {
881 unsigned long start = ALIGN_DOWN(addr, PMD_SIZE);
882
883 /*
884 * Flush the unused range cache to ensure that memchr_inv() will work
885 * for the whole range.
886 */
887 vmemmap_flush_unused_pmd();
888 memset((void *)addr, PAGE_UNUSED, end - addr);
889
890 return !memchr_inv((void *)start, PAGE_UNUSED, PMD_SIZE);
891 }
892 #endif
893
__vmemmap_use_sub_pmd(unsigned long start)894 static void __meminit __vmemmap_use_sub_pmd(unsigned long start)
895 {
896 /*
897 * As we expect to add in the same granularity as we remove, it's
898 * sufficient to mark only some piece used to block the memmap page from
899 * getting removed when removing some other adjacent memmap (just in
900 * case the first memmap never gets initialized e.g., because the memory
901 * block never gets onlined).
902 */
903 memset((void *)start, 0, sizeof(struct page));
904 }
905
vmemmap_use_sub_pmd(unsigned long start,unsigned long end)906 static void __meminit vmemmap_use_sub_pmd(unsigned long start, unsigned long end)
907 {
908 /*
909 * We only optimize if the new used range directly follows the
910 * previously unused range (esp., when populating consecutive sections).
911 */
912 if (unused_pmd_start == start) {
913 if (likely(IS_ALIGNED(end, PMD_SIZE)))
914 unused_pmd_start = 0;
915 else
916 unused_pmd_start = end;
917 return;
918 }
919
920 /*
921 * If the range does not contiguously follows previous one, make sure
922 * to mark the unused range of the previous one so it can be removed.
923 */
924 vmemmap_flush_unused_pmd();
925 __vmemmap_use_sub_pmd(start);
926 }
927
928
vmemmap_use_new_sub_pmd(unsigned long start,unsigned long end)929 static void __meminit vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end)
930 {
931 const unsigned long page = ALIGN_DOWN(start, PMD_SIZE);
932
933 vmemmap_flush_unused_pmd();
934
935 /*
936 * Could be our memmap page is filled with PAGE_UNUSED already from a
937 * previous remove. Make sure to reset it.
938 */
939 __vmemmap_use_sub_pmd(start);
940
941 /*
942 * Mark with PAGE_UNUSED the unused parts of the new memmap range
943 */
944 if (!IS_ALIGNED(start, PMD_SIZE))
945 memset((void *)page, PAGE_UNUSED, start - page);
946
947 /*
948 * We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of
949 * consecutive sections. Remember for the last added PMD where the
950 * unused range begins.
951 */
952 if (!IS_ALIGNED(end, PMD_SIZE))
953 unused_pmd_start = end;
954 }
955 #endif
956
957 /*
958 * Memory hotplug specific functions
959 */
960 #ifdef CONFIG_MEMORY_HOTPLUG
961 /*
962 * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need
963 * updating.
964 */
update_end_of_memory_vars(u64 start,u64 size)965 static void update_end_of_memory_vars(u64 start, u64 size)
966 {
967 unsigned long end_pfn = PFN_UP(start + size);
968
969 if (end_pfn > max_pfn) {
970 max_pfn = end_pfn;
971 max_low_pfn = end_pfn;
972 high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
973 }
974 }
975
add_pages(int nid,unsigned long start_pfn,unsigned long nr_pages,struct mhp_params * params)976 int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
977 struct mhp_params *params)
978 {
979 unsigned long end = ((start_pfn + nr_pages) << PAGE_SHIFT) - 1;
980 int ret;
981
982 if (WARN_ON_ONCE(end > PHYSMEM_END))
983 return -ERANGE;
984
985 ret = __add_pages(nid, start_pfn, nr_pages, params);
986 WARN_ON_ONCE(ret);
987
988 /*
989 * Special case: add_pages() is called by memremap_pages() for adding device
990 * private pages. Do not bump up max_pfn in the device private path,
991 * because max_pfn changes affect dma_addressing_limited().
992 *
993 * dma_addressing_limited() returning true when max_pfn is the device's
994 * addressable memory can force device drivers to use bounce buffers
995 * and impact their performance negatively:
996 */
997 if (!params->pgmap)
998 /* update max_pfn, max_low_pfn and high_memory */
999 update_end_of_memory_vars(start_pfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT);
1000
1001 return ret;
1002 }
1003
arch_add_memory(int nid,u64 start,u64 size,struct mhp_params * params)1004 int arch_add_memory(int nid, u64 start, u64 size,
1005 struct mhp_params *params)
1006 {
1007 unsigned long start_pfn = start >> PAGE_SHIFT;
1008 unsigned long nr_pages = size >> PAGE_SHIFT;
1009
1010 init_memory_mapping(start, start + size, params->pgprot);
1011
1012 return add_pages(nid, start_pfn, nr_pages, params);
1013 }
1014
free_pagetable(struct page * page,int order)1015 static void __meminit free_pagetable(struct page *page, int order)
1016 {
1017 unsigned long magic;
1018 unsigned int nr_pages = 1 << order;
1019
1020 /* bootmem page has reserved flag */
1021 if (PageReserved(page)) {
1022 magic = page->index;
1023 if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) {
1024 while (nr_pages--)
1025 put_page_bootmem(page++);
1026 } else
1027 while (nr_pages--)
1028 free_reserved_page(page++);
1029 } else
1030 free_pages((unsigned long)page_address(page), order);
1031 }
1032
free_hugepage_table(struct page * page,struct vmem_altmap * altmap)1033 static void __meminit free_hugepage_table(struct page *page,
1034 struct vmem_altmap *altmap)
1035 {
1036 if (altmap)
1037 vmem_altmap_free(altmap, PMD_SIZE / PAGE_SIZE);
1038 else
1039 free_pagetable(page, get_order(PMD_SIZE));
1040 }
1041
free_pte_table(pte_t * pte_start,pmd_t * pmd)1042 static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd)
1043 {
1044 pte_t *pte;
1045 int i;
1046
1047 for (i = 0; i < PTRS_PER_PTE; i++) {
1048 pte = pte_start + i;
1049 if (!pte_none(*pte))
1050 return;
1051 }
1052
1053 /* free a pte table */
1054 free_pagetable(pmd_page(*pmd), 0);
1055 spin_lock(&init_mm.page_table_lock);
1056 pmd_clear(pmd);
1057 spin_unlock(&init_mm.page_table_lock);
1058 }
1059
free_pmd_table(pmd_t * pmd_start,pud_t * pud)1060 static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud)
1061 {
1062 pmd_t *pmd;
1063 int i;
1064
1065 for (i = 0; i < PTRS_PER_PMD; i++) {
1066 pmd = pmd_start + i;
1067 if (!pmd_none(*pmd))
1068 return;
1069 }
1070
1071 /* free a pmd table */
1072 free_pagetable(pud_page(*pud), 0);
1073 spin_lock(&init_mm.page_table_lock);
1074 pud_clear(pud);
1075 spin_unlock(&init_mm.page_table_lock);
1076 }
1077
free_pud_table(pud_t * pud_start,p4d_t * p4d)1078 static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d)
1079 {
1080 pud_t *pud;
1081 int i;
1082
1083 for (i = 0; i < PTRS_PER_PUD; i++) {
1084 pud = pud_start + i;
1085 if (!pud_none(*pud))
1086 return;
1087 }
1088
1089 /* free a pud table */
1090 free_pagetable(p4d_page(*p4d), 0);
1091 spin_lock(&init_mm.page_table_lock);
1092 p4d_clear(p4d);
1093 spin_unlock(&init_mm.page_table_lock);
1094 }
1095
1096 static void __meminit
remove_pte_table(pte_t * pte_start,unsigned long addr,unsigned long end,bool direct)1097 remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
1098 bool direct)
1099 {
1100 unsigned long next, pages = 0;
1101 pte_t *pte;
1102 phys_addr_t phys_addr;
1103
1104 pte = pte_start + pte_index(addr);
1105 for (; addr < end; addr = next, pte++) {
1106 next = (addr + PAGE_SIZE) & PAGE_MASK;
1107 if (next > end)
1108 next = end;
1109
1110 if (!pte_present(*pte))
1111 continue;
1112
1113 /*
1114 * We mapped [0,1G) memory as identity mapping when
1115 * initializing, in arch/x86/kernel/head_64.S. These
1116 * pagetables cannot be removed.
1117 */
1118 phys_addr = pte_val(*pte) + (addr & PAGE_MASK);
1119 if (phys_addr < (phys_addr_t)0x40000000)
1120 return;
1121
1122 if (!direct)
1123 free_pagetable(pte_page(*pte), 0);
1124
1125 spin_lock(&init_mm.page_table_lock);
1126 pte_clear(&init_mm, addr, pte);
1127 spin_unlock(&init_mm.page_table_lock);
1128
1129 /* For non-direct mapping, pages means nothing. */
1130 pages++;
1131 }
1132
1133 /* Call free_pte_table() in remove_pmd_table(). */
1134 flush_tlb_all();
1135 if (direct)
1136 update_page_count(PG_LEVEL_4K, -pages);
1137 }
1138
1139 static void __meminit
remove_pmd_table(pmd_t * pmd_start,unsigned long addr,unsigned long end,bool direct,struct vmem_altmap * altmap)1140 remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
1141 bool direct, struct vmem_altmap *altmap)
1142 {
1143 unsigned long next, pages = 0;
1144 pte_t *pte_base;
1145 pmd_t *pmd;
1146
1147 pmd = pmd_start + pmd_index(addr);
1148 for (; addr < end; addr = next, pmd++) {
1149 next = pmd_addr_end(addr, end);
1150
1151 if (!pmd_present(*pmd))
1152 continue;
1153
1154 if (pmd_leaf(*pmd)) {
1155 if (IS_ALIGNED(addr, PMD_SIZE) &&
1156 IS_ALIGNED(next, PMD_SIZE)) {
1157 if (!direct)
1158 free_hugepage_table(pmd_page(*pmd),
1159 altmap);
1160
1161 spin_lock(&init_mm.page_table_lock);
1162 pmd_clear(pmd);
1163 spin_unlock(&init_mm.page_table_lock);
1164 pages++;
1165 }
1166 #ifdef CONFIG_SPARSEMEM_VMEMMAP
1167 else if (vmemmap_pmd_is_unused(addr, next)) {
1168 free_hugepage_table(pmd_page(*pmd),
1169 altmap);
1170 spin_lock(&init_mm.page_table_lock);
1171 pmd_clear(pmd);
1172 spin_unlock(&init_mm.page_table_lock);
1173 }
1174 #endif
1175 continue;
1176 }
1177
1178 pte_base = (pte_t *)pmd_page_vaddr(*pmd);
1179 remove_pte_table(pte_base, addr, next, direct);
1180 free_pte_table(pte_base, pmd);
1181 }
1182
1183 /* Call free_pmd_table() in remove_pud_table(). */
1184 if (direct)
1185 update_page_count(PG_LEVEL_2M, -pages);
1186 }
1187
1188 static void __meminit
remove_pud_table(pud_t * pud_start,unsigned long addr,unsigned long end,struct vmem_altmap * altmap,bool direct)1189 remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
1190 struct vmem_altmap *altmap, bool direct)
1191 {
1192 unsigned long next, pages = 0;
1193 pmd_t *pmd_base;
1194 pud_t *pud;
1195
1196 pud = pud_start + pud_index(addr);
1197 for (; addr < end; addr = next, pud++) {
1198 next = pud_addr_end(addr, end);
1199
1200 if (!pud_present(*pud))
1201 continue;
1202
1203 if (pud_leaf(*pud) &&
1204 IS_ALIGNED(addr, PUD_SIZE) &&
1205 IS_ALIGNED(next, PUD_SIZE)) {
1206 spin_lock(&init_mm.page_table_lock);
1207 pud_clear(pud);
1208 spin_unlock(&init_mm.page_table_lock);
1209 pages++;
1210 continue;
1211 }
1212
1213 pmd_base = pmd_offset(pud, 0);
1214 remove_pmd_table(pmd_base, addr, next, direct, altmap);
1215 free_pmd_table(pmd_base, pud);
1216 }
1217
1218 if (direct)
1219 update_page_count(PG_LEVEL_1G, -pages);
1220 }
1221
1222 static void __meminit
remove_p4d_table(p4d_t * p4d_start,unsigned long addr,unsigned long end,struct vmem_altmap * altmap,bool direct)1223 remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end,
1224 struct vmem_altmap *altmap, bool direct)
1225 {
1226 unsigned long next, pages = 0;
1227 pud_t *pud_base;
1228 p4d_t *p4d;
1229
1230 p4d = p4d_start + p4d_index(addr);
1231 for (; addr < end; addr = next, p4d++) {
1232 next = p4d_addr_end(addr, end);
1233
1234 if (!p4d_present(*p4d))
1235 continue;
1236
1237 BUILD_BUG_ON(p4d_leaf(*p4d));
1238
1239 pud_base = pud_offset(p4d, 0);
1240 remove_pud_table(pud_base, addr, next, altmap, direct);
1241 /*
1242 * For 4-level page tables we do not want to free PUDs, but in the
1243 * 5-level case we should free them. This code will have to change
1244 * to adapt for boot-time switching between 4 and 5 level page tables.
1245 */
1246 if (pgtable_l5_enabled())
1247 free_pud_table(pud_base, p4d);
1248 }
1249
1250 if (direct)
1251 update_page_count(PG_LEVEL_512G, -pages);
1252 }
1253
1254 /* start and end are both virtual address. */
1255 static void __meminit
remove_pagetable(unsigned long start,unsigned long end,bool direct,struct vmem_altmap * altmap)1256 remove_pagetable(unsigned long start, unsigned long end, bool direct,
1257 struct vmem_altmap *altmap)
1258 {
1259 unsigned long next;
1260 unsigned long addr;
1261 pgd_t *pgd;
1262 p4d_t *p4d;
1263
1264 for (addr = start; addr < end; addr = next) {
1265 next = pgd_addr_end(addr, end);
1266
1267 pgd = pgd_offset_k(addr);
1268 if (!pgd_present(*pgd))
1269 continue;
1270
1271 p4d = p4d_offset(pgd, 0);
1272 remove_p4d_table(p4d, addr, next, altmap, direct);
1273 }
1274
1275 flush_tlb_all();
1276 }
1277
vmemmap_free(unsigned long start,unsigned long end,struct vmem_altmap * altmap)1278 void __ref vmemmap_free(unsigned long start, unsigned long end,
1279 struct vmem_altmap *altmap)
1280 {
1281 VM_BUG_ON(!PAGE_ALIGNED(start));
1282 VM_BUG_ON(!PAGE_ALIGNED(end));
1283
1284 remove_pagetable(start, end, false, altmap);
1285 }
1286
1287 static void __meminit
kernel_physical_mapping_remove(unsigned long start,unsigned long end)1288 kernel_physical_mapping_remove(unsigned long start, unsigned long end)
1289 {
1290 start = (unsigned long)__va(start);
1291 end = (unsigned long)__va(end);
1292
1293 remove_pagetable(start, end, true, NULL);
1294 }
1295
arch_remove_memory(u64 start,u64 size,struct vmem_altmap * altmap)1296 void __ref arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
1297 {
1298 unsigned long start_pfn = start >> PAGE_SHIFT;
1299 unsigned long nr_pages = size >> PAGE_SHIFT;
1300
1301 __remove_pages(start_pfn, nr_pages, altmap);
1302 kernel_physical_mapping_remove(start, start + size);
1303 }
1304 #endif /* CONFIG_MEMORY_HOTPLUG */
1305
1306 static struct kcore_list kcore_vsyscall;
1307
register_page_bootmem_info(void)1308 static void __init register_page_bootmem_info(void)
1309 {
1310 #if defined(CONFIG_NUMA) || defined(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP)
1311 int i;
1312
1313 for_each_online_node(i)
1314 register_page_bootmem_info_node(NODE_DATA(i));
1315 #endif
1316 }
1317
1318 /*
1319 * Pre-allocates page-table pages for the vmalloc area in the kernel page-table.
1320 * Only the level which needs to be synchronized between all page-tables is
1321 * allocated because the synchronization can be expensive.
1322 */
preallocate_vmalloc_pages(void)1323 static void __init preallocate_vmalloc_pages(void)
1324 {
1325 unsigned long addr;
1326 const char *lvl;
1327
1328 for (addr = VMALLOC_START; addr <= VMEMORY_END; addr = ALIGN(addr + 1, PGDIR_SIZE)) {
1329 pgd_t *pgd = pgd_offset_k(addr);
1330 p4d_t *p4d;
1331 pud_t *pud;
1332
1333 lvl = "p4d";
1334 p4d = p4d_alloc(&init_mm, pgd, addr);
1335 if (!p4d)
1336 goto failed;
1337
1338 if (pgtable_l5_enabled())
1339 continue;
1340
1341 /*
1342 * The goal here is to allocate all possibly required
1343 * hardware page tables pointed to by the top hardware
1344 * level.
1345 *
1346 * On 4-level systems, the P4D layer is folded away and
1347 * the above code does no preallocation. Below, go down
1348 * to the pud _software_ level to ensure the second
1349 * hardware level is allocated on 4-level systems too.
1350 */
1351 lvl = "pud";
1352 pud = pud_alloc(&init_mm, p4d, addr);
1353 if (!pud)
1354 goto failed;
1355 }
1356
1357 return;
1358
1359 failed:
1360
1361 /*
1362 * The pages have to be there now or they will be missing in
1363 * process page-tables later.
1364 */
1365 panic("Failed to pre-allocate %s pages for vmalloc area\n", lvl);
1366 }
1367
mem_init(void)1368 void __init mem_init(void)
1369 {
1370 pci_iommu_alloc();
1371
1372 /* clear_bss() already clear the empty_zero_page */
1373
1374 /* this will put all memory onto the freelists */
1375 memblock_free_all();
1376 after_bootmem = 1;
1377 x86_init.hyper.init_after_bootmem();
1378
1379 /*
1380 * Must be done after boot memory is put on freelist, because here we
1381 * might set fields in deferred struct pages that have not yet been
1382 * initialized, and memblock_free_all() initializes all the reserved
1383 * deferred pages for us.
1384 */
1385 register_page_bootmem_info();
1386
1387 /* Register memory areas for /proc/kcore */
1388 if (get_gate_vma(&init_mm))
1389 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, PAGE_SIZE, KCORE_USER);
1390
1391 preallocate_vmalloc_pages();
1392 }
1393
1394 int kernel_set_to_readonly;
1395
mark_rodata_ro(void)1396 void mark_rodata_ro(void)
1397 {
1398 unsigned long start = PFN_ALIGN(_text);
1399 unsigned long rodata_start = PFN_ALIGN(__start_rodata);
1400 unsigned long end = (unsigned long)__end_rodata_hpage_align;
1401 unsigned long text_end = PFN_ALIGN(_etext);
1402 unsigned long rodata_end = PFN_ALIGN(__end_rodata);
1403 unsigned long all_end;
1404
1405 printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
1406 (end - start) >> 10);
1407 set_memory_ro(start, (end - start) >> PAGE_SHIFT);
1408
1409 kernel_set_to_readonly = 1;
1410
1411 /*
1412 * The rodata/data/bss/brk section (but not the kernel text!)
1413 * should also be not-executable.
1414 *
1415 * We align all_end to PMD_SIZE because the existing mapping
1416 * is a full PMD. If we would align _brk_end to PAGE_SIZE we
1417 * split the PMD and the reminder between _brk_end and the end
1418 * of the PMD will remain mapped executable.
1419 *
1420 * Any PMD which was setup after the one which covers _brk_end
1421 * has been zapped already via cleanup_highmem().
1422 */
1423 all_end = roundup((unsigned long)_brk_end, PMD_SIZE);
1424 set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT);
1425
1426 set_ftrace_ops_ro();
1427
1428 #ifdef CONFIG_CPA_DEBUG
1429 printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end);
1430 set_memory_rw(start, (end-start) >> PAGE_SHIFT);
1431
1432 printk(KERN_INFO "Testing CPA: again\n");
1433 set_memory_ro(start, (end-start) >> PAGE_SHIFT);
1434 #endif
1435
1436 free_kernel_image_pages("unused kernel image (text/rodata gap)",
1437 (void *)text_end, (void *)rodata_start);
1438 free_kernel_image_pages("unused kernel image (rodata/data gap)",
1439 (void *)rodata_end, (void *)_sdata);
1440 }
1441
1442 /*
1443 * Block size is the minimum amount of memory which can be hotplugged or
1444 * hotremoved. It must be power of two and must be equal or larger than
1445 * MIN_MEMORY_BLOCK_SIZE.
1446 */
1447 #define MAX_BLOCK_SIZE (2UL << 30)
1448
1449 /* Amount of ram needed to start using large blocks */
1450 #define MEM_SIZE_FOR_LARGE_BLOCK (64UL << 30)
1451
1452 /* Adjustable memory block size */
1453 static unsigned long set_memory_block_size;
set_memory_block_size_order(unsigned int order)1454 int __init set_memory_block_size_order(unsigned int order)
1455 {
1456 unsigned long size = 1UL << order;
1457
1458 if (size > MEM_SIZE_FOR_LARGE_BLOCK || size < MIN_MEMORY_BLOCK_SIZE)
1459 return -EINVAL;
1460
1461 set_memory_block_size = size;
1462 return 0;
1463 }
1464
probe_memory_block_size(void)1465 static unsigned long probe_memory_block_size(void)
1466 {
1467 unsigned long boot_mem_end = max_pfn << PAGE_SHIFT;
1468 unsigned long bz;
1469
1470 /* If memory block size has been set, then use it */
1471 bz = set_memory_block_size;
1472 if (bz)
1473 goto done;
1474
1475 /* Use regular block if RAM is smaller than MEM_SIZE_FOR_LARGE_BLOCK */
1476 if (boot_mem_end < MEM_SIZE_FOR_LARGE_BLOCK) {
1477 bz = MIN_MEMORY_BLOCK_SIZE;
1478 goto done;
1479 }
1480
1481 /*
1482 * Use max block size to minimize overhead on bare metal, where
1483 * alignment for memory hotplug isn't a concern.
1484 */
1485 if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
1486 bz = MAX_BLOCK_SIZE;
1487 goto done;
1488 }
1489
1490 /* Find the largest allowed block size that aligns to memory end */
1491 for (bz = MAX_BLOCK_SIZE; bz > MIN_MEMORY_BLOCK_SIZE; bz >>= 1) {
1492 if (IS_ALIGNED(boot_mem_end, bz))
1493 break;
1494 }
1495 done:
1496 pr_info("x86/mm: Memory block size: %ldMB\n", bz >> 20);
1497
1498 return bz;
1499 }
1500
1501 static unsigned long memory_block_size_probed;
memory_block_size_bytes(void)1502 unsigned long memory_block_size_bytes(void)
1503 {
1504 if (!memory_block_size_probed)
1505 memory_block_size_probed = probe_memory_block_size();
1506
1507 return memory_block_size_probed;
1508 }
1509
1510 #ifdef CONFIG_SPARSEMEM_VMEMMAP
1511 /*
1512 * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
1513 */
1514 static long __meminitdata addr_start, addr_end;
1515 static void __meminitdata *p_start, *p_end;
1516 static int __meminitdata node_start;
1517
vmemmap_set_pmd(pmd_t * pmd,void * p,int node,unsigned long addr,unsigned long next)1518 void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
1519 unsigned long addr, unsigned long next)
1520 {
1521 pte_t entry;
1522
1523 entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
1524 PAGE_KERNEL_LARGE);
1525 set_pmd(pmd, __pmd(pte_val(entry)));
1526
1527 /* check to see if we have contiguous blocks */
1528 if (p_end != p || node_start != node) {
1529 if (p_start)
1530 pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1531 addr_start, addr_end-1, p_start, p_end-1, node_start);
1532 addr_start = addr;
1533 node_start = node;
1534 p_start = p;
1535 }
1536
1537 addr_end = addr + PMD_SIZE;
1538 p_end = p + PMD_SIZE;
1539
1540 if (!IS_ALIGNED(addr, PMD_SIZE) ||
1541 !IS_ALIGNED(next, PMD_SIZE))
1542 vmemmap_use_new_sub_pmd(addr, next);
1543 }
1544
vmemmap_check_pmd(pmd_t * pmd,int node,unsigned long addr,unsigned long next)1545 int __meminit vmemmap_check_pmd(pmd_t *pmd, int node,
1546 unsigned long addr, unsigned long next)
1547 {
1548 int large = pmd_leaf(*pmd);
1549
1550 if (pmd_leaf(*pmd)) {
1551 vmemmap_verify((pte_t *)pmd, node, addr, next);
1552 vmemmap_use_sub_pmd(addr, next);
1553 }
1554
1555 return large;
1556 }
1557
vmemmap_populate(unsigned long start,unsigned long end,int node,struct vmem_altmap * altmap)1558 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
1559 struct vmem_altmap *altmap)
1560 {
1561 int err;
1562
1563 VM_BUG_ON(!PAGE_ALIGNED(start));
1564 VM_BUG_ON(!PAGE_ALIGNED(end));
1565
1566 if (end - start < PAGES_PER_SECTION * sizeof(struct page))
1567 err = vmemmap_populate_basepages(start, end, node, NULL);
1568 else if (boot_cpu_has(X86_FEATURE_PSE))
1569 err = vmemmap_populate_hugepages(start, end, node, altmap);
1570 else if (altmap) {
1571 pr_err_once("%s: no cpu support for altmap allocations\n",
1572 __func__);
1573 err = -ENOMEM;
1574 } else
1575 err = vmemmap_populate_basepages(start, end, node, NULL);
1576 if (!err)
1577 sync_global_pgds(start, end - 1);
1578 return err;
1579 }
1580
1581 #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
register_page_bootmem_memmap(unsigned long section_nr,struct page * start_page,unsigned long nr_pages)1582 void register_page_bootmem_memmap(unsigned long section_nr,
1583 struct page *start_page, unsigned long nr_pages)
1584 {
1585 unsigned long addr = (unsigned long)start_page;
1586 unsigned long end = (unsigned long)(start_page + nr_pages);
1587 unsigned long next;
1588 pgd_t *pgd;
1589 p4d_t *p4d;
1590 pud_t *pud;
1591 pmd_t *pmd;
1592 unsigned int nr_pmd_pages;
1593 struct page *page;
1594
1595 for (; addr < end; addr = next) {
1596 pte_t *pte = NULL;
1597
1598 pgd = pgd_offset_k(addr);
1599 if (pgd_none(*pgd)) {
1600 next = (addr + PAGE_SIZE) & PAGE_MASK;
1601 continue;
1602 }
1603 get_page_bootmem(section_nr, pgd_page(*pgd), MIX_SECTION_INFO);
1604
1605 p4d = p4d_offset(pgd, addr);
1606 if (p4d_none(*p4d)) {
1607 next = (addr + PAGE_SIZE) & PAGE_MASK;
1608 continue;
1609 }
1610 get_page_bootmem(section_nr, p4d_page(*p4d), MIX_SECTION_INFO);
1611
1612 pud = pud_offset(p4d, addr);
1613 if (pud_none(*pud)) {
1614 next = (addr + PAGE_SIZE) & PAGE_MASK;
1615 continue;
1616 }
1617 get_page_bootmem(section_nr, pud_page(*pud), MIX_SECTION_INFO);
1618
1619 if (!boot_cpu_has(X86_FEATURE_PSE)) {
1620 next = (addr + PAGE_SIZE) & PAGE_MASK;
1621 pmd = pmd_offset(pud, addr);
1622 if (pmd_none(*pmd))
1623 continue;
1624 get_page_bootmem(section_nr, pmd_page(*pmd),
1625 MIX_SECTION_INFO);
1626
1627 pte = pte_offset_kernel(pmd, addr);
1628 if (pte_none(*pte))
1629 continue;
1630 get_page_bootmem(section_nr, pte_page(*pte),
1631 SECTION_INFO);
1632 } else {
1633 next = pmd_addr_end(addr, end);
1634
1635 pmd = pmd_offset(pud, addr);
1636 if (pmd_none(*pmd))
1637 continue;
1638
1639 nr_pmd_pages = 1 << get_order(PMD_SIZE);
1640 page = pmd_page(*pmd);
1641 while (nr_pmd_pages--)
1642 get_page_bootmem(section_nr, page++,
1643 SECTION_INFO);
1644 }
1645 }
1646 }
1647 #endif
1648
vmemmap_populate_print_last(void)1649 void __meminit vmemmap_populate_print_last(void)
1650 {
1651 if (p_start) {
1652 pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1653 addr_start, addr_end-1, p_start, p_end-1, node_start);
1654 p_start = NULL;
1655 p_end = NULL;
1656 node_start = 0;
1657 }
1658 }
1659 #endif
1660