1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright IBM Corp. 2006
4 */
5
6 #include <linux/memory_hotplug.h>
7 #include <linux/memblock.h>
8 #include <linux/pfn.h>
9 #include <linux/mm.h>
10 #include <linux/init.h>
11 #include <linux/list.h>
12 #include <linux/hugetlb.h>
13 #include <linux/slab.h>
14 #include <linux/sort.h>
15 #include <asm/page-states.h>
16 #include <asm/abs_lowcore.h>
17 #include <asm/cacheflush.h>
18 #include <asm/maccess.h>
19 #include <asm/nospec-branch.h>
20 #include <asm/ctlreg.h>
21 #include <asm/pgalloc.h>
22 #include <asm/setup.h>
23 #include <asm/tlbflush.h>
24 #include <asm/sections.h>
25 #include <asm/set_memory.h>
26 #include <asm/physmem_info.h>
27
28 static DEFINE_MUTEX(vmem_mutex);
29
vmem_alloc_pages(unsigned int order)30 static void __ref *vmem_alloc_pages(unsigned int order)
31 {
32 unsigned long size = PAGE_SIZE << order;
33
34 if (slab_is_available())
35 return (void *)__get_free_pages(GFP_KERNEL, order);
36 return memblock_alloc(size, size);
37 }
38
vmem_free_pages(unsigned long addr,int order,struct vmem_altmap * altmap)39 static void vmem_free_pages(unsigned long addr, int order, struct vmem_altmap *altmap)
40 {
41 if (altmap) {
42 vmem_altmap_free(altmap, 1 << order);
43 return;
44 }
45 /* We don't expect boot memory to be removed ever. */
46 if (!slab_is_available() ||
47 WARN_ON_ONCE(PageReserved(virt_to_page((void *)addr))))
48 return;
49 free_pages(addr, order);
50 }
51
vmem_crst_alloc(unsigned long val)52 void *vmem_crst_alloc(unsigned long val)
53 {
54 unsigned long *table;
55
56 table = vmem_alloc_pages(CRST_ALLOC_ORDER);
57 if (!table)
58 return NULL;
59 crst_table_init(table, val);
60 __arch_set_page_dat(table, 1UL << CRST_ALLOC_ORDER);
61 return table;
62 }
63
vmem_pte_alloc(void)64 pte_t __ref *vmem_pte_alloc(void)
65 {
66 pte_t *pte;
67
68 if (slab_is_available())
69 pte = (pte_t *)page_table_alloc(&init_mm);
70 else
71 pte = (pte_t *)memblock_alloc(PAGE_SIZE, PAGE_SIZE);
72 if (!pte)
73 return NULL;
74 memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
75 __arch_set_page_dat(pte, 1);
76 return pte;
77 }
78
vmem_pte_free(unsigned long * table)79 static void vmem_pte_free(unsigned long *table)
80 {
81 /* We don't expect boot memory to be removed ever. */
82 if (!slab_is_available() ||
83 WARN_ON_ONCE(PageReserved(virt_to_page(table))))
84 return;
85 page_table_free(&init_mm, table);
86 }
87
88 #define PAGE_UNUSED 0xFD
89
90 /*
91 * The unused vmemmap range, which was not yet memset(PAGE_UNUSED) ranges
92 * from unused_sub_pmd_start to next PMD_SIZE boundary.
93 */
94 static unsigned long unused_sub_pmd_start;
95
vmemmap_flush_unused_sub_pmd(void)96 static void vmemmap_flush_unused_sub_pmd(void)
97 {
98 if (!unused_sub_pmd_start)
99 return;
100 memset((void *)unused_sub_pmd_start, PAGE_UNUSED,
101 ALIGN(unused_sub_pmd_start, PMD_SIZE) - unused_sub_pmd_start);
102 unused_sub_pmd_start = 0;
103 }
104
vmemmap_mark_sub_pmd_used(unsigned long start,unsigned long end)105 static void vmemmap_mark_sub_pmd_used(unsigned long start, unsigned long end)
106 {
107 /*
108 * As we expect to add in the same granularity as we remove, it's
109 * sufficient to mark only some piece used to block the memmap page from
110 * getting removed (just in case the memmap never gets initialized,
111 * e.g., because the memory block never gets onlined).
112 */
113 memset((void *)start, 0, sizeof(struct page));
114 }
115
vmemmap_use_sub_pmd(unsigned long start,unsigned long end)116 static void vmemmap_use_sub_pmd(unsigned long start, unsigned long end)
117 {
118 /*
119 * We only optimize if the new used range directly follows the
120 * previously unused range (esp., when populating consecutive sections).
121 */
122 if (unused_sub_pmd_start == start) {
123 unused_sub_pmd_start = end;
124 if (likely(IS_ALIGNED(unused_sub_pmd_start, PMD_SIZE)))
125 unused_sub_pmd_start = 0;
126 return;
127 }
128 vmemmap_flush_unused_sub_pmd();
129 vmemmap_mark_sub_pmd_used(start, end);
130 }
131
vmemmap_use_new_sub_pmd(unsigned long start,unsigned long end)132 static void vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end)
133 {
134 unsigned long page = ALIGN_DOWN(start, PMD_SIZE);
135
136 vmemmap_flush_unused_sub_pmd();
137
138 /* Could be our memmap page is filled with PAGE_UNUSED already ... */
139 vmemmap_mark_sub_pmd_used(start, end);
140
141 /* Mark the unused parts of the new memmap page PAGE_UNUSED. */
142 if (!IS_ALIGNED(start, PMD_SIZE))
143 memset((void *)page, PAGE_UNUSED, start - page);
144 /*
145 * We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of
146 * consecutive sections. Remember for the last added PMD the last
147 * unused range in the populated PMD.
148 */
149 if (!IS_ALIGNED(end, PMD_SIZE))
150 unused_sub_pmd_start = end;
151 }
152
153 /* Returns true if the PMD is completely unused and can be freed. */
vmemmap_unuse_sub_pmd(unsigned long start,unsigned long end)154 static bool vmemmap_unuse_sub_pmd(unsigned long start, unsigned long end)
155 {
156 unsigned long page = ALIGN_DOWN(start, PMD_SIZE);
157
158 vmemmap_flush_unused_sub_pmd();
159 memset((void *)start, PAGE_UNUSED, end - start);
160 return !memchr_inv((void *)page, PAGE_UNUSED, PMD_SIZE);
161 }
162
163 /* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
modify_pte_table(pmd_t * pmd,unsigned long addr,unsigned long end,bool add,bool direct,struct vmem_altmap * altmap)164 static int __ref modify_pte_table(pmd_t *pmd, unsigned long addr,
165 unsigned long end, bool add, bool direct,
166 struct vmem_altmap *altmap)
167 {
168 unsigned long prot, pages = 0;
169 int ret = -ENOMEM;
170 pte_t *pte;
171
172 prot = pgprot_val(PAGE_KERNEL);
173 if (!MACHINE_HAS_NX)
174 prot &= ~_PAGE_NOEXEC;
175
176 pte = pte_offset_kernel(pmd, addr);
177 for (; addr < end; addr += PAGE_SIZE, pte++) {
178 if (!add) {
179 if (pte_none(*pte))
180 continue;
181 if (!direct)
182 vmem_free_pages((unsigned long)pfn_to_virt(pte_pfn(*pte)), get_order(PAGE_SIZE), altmap);
183 pte_clear(&init_mm, addr, pte);
184 } else if (pte_none(*pte)) {
185 if (!direct) {
186 void *new_page = vmemmap_alloc_block_buf(PAGE_SIZE, NUMA_NO_NODE, altmap);
187
188 if (!new_page)
189 goto out;
190 set_pte(pte, __pte(__pa(new_page) | prot));
191 } else {
192 set_pte(pte, __pte(__pa(addr) | prot));
193 }
194 } else {
195 continue;
196 }
197 pages++;
198 }
199 ret = 0;
200 out:
201 if (direct)
202 update_page_count(PG_DIRECT_MAP_4K, add ? pages : -pages);
203 return ret;
204 }
205
try_free_pte_table(pmd_t * pmd,unsigned long start)206 static void try_free_pte_table(pmd_t *pmd, unsigned long start)
207 {
208 pte_t *pte;
209 int i;
210
211 /* We can safely assume this is fully in 1:1 mapping & vmemmap area */
212 pte = pte_offset_kernel(pmd, start);
213 for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
214 if (!pte_none(*pte))
215 return;
216 }
217 vmem_pte_free((unsigned long *) pmd_deref(*pmd));
218 pmd_clear(pmd);
219 }
220
221 /* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
modify_pmd_table(pud_t * pud,unsigned long addr,unsigned long end,bool add,bool direct,struct vmem_altmap * altmap)222 static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
223 unsigned long end, bool add, bool direct,
224 struct vmem_altmap *altmap)
225 {
226 unsigned long next, prot, pages = 0;
227 int ret = -ENOMEM;
228 pmd_t *pmd;
229 pte_t *pte;
230
231 prot = pgprot_val(SEGMENT_KERNEL);
232 if (!MACHINE_HAS_NX)
233 prot &= ~_SEGMENT_ENTRY_NOEXEC;
234
235 pmd = pmd_offset(pud, addr);
236 for (; addr < end; addr = next, pmd++) {
237 next = pmd_addr_end(addr, end);
238 if (!add) {
239 if (pmd_none(*pmd))
240 continue;
241 if (pmd_leaf(*pmd)) {
242 if (IS_ALIGNED(addr, PMD_SIZE) &&
243 IS_ALIGNED(next, PMD_SIZE)) {
244 if (!direct)
245 vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE), altmap);
246 pmd_clear(pmd);
247 pages++;
248 } else if (!direct && vmemmap_unuse_sub_pmd(addr, next)) {
249 vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE), altmap);
250 pmd_clear(pmd);
251 }
252 continue;
253 }
254 } else if (pmd_none(*pmd)) {
255 if (IS_ALIGNED(addr, PMD_SIZE) &&
256 IS_ALIGNED(next, PMD_SIZE) &&
257 MACHINE_HAS_EDAT1 && direct &&
258 !debug_pagealloc_enabled()) {
259 set_pmd(pmd, __pmd(__pa(addr) | prot));
260 pages++;
261 continue;
262 } else if (!direct && MACHINE_HAS_EDAT1) {
263 void *new_page;
264
265 /*
266 * Use 1MB frames for vmemmap if available. We
267 * always use large frames even if they are only
268 * partially used. Otherwise we would have also
269 * page tables since vmemmap_populate gets
270 * called for each section separately.
271 */
272 new_page = vmemmap_alloc_block_buf(PMD_SIZE, NUMA_NO_NODE, altmap);
273 if (new_page) {
274 set_pmd(pmd, __pmd(__pa(new_page) | prot));
275 if (!IS_ALIGNED(addr, PMD_SIZE) ||
276 !IS_ALIGNED(next, PMD_SIZE)) {
277 vmemmap_use_new_sub_pmd(addr, next);
278 }
279 continue;
280 }
281 }
282 pte = vmem_pte_alloc();
283 if (!pte)
284 goto out;
285 pmd_populate(&init_mm, pmd, pte);
286 } else if (pmd_leaf(*pmd)) {
287 if (!direct)
288 vmemmap_use_sub_pmd(addr, next);
289 continue;
290 }
291 ret = modify_pte_table(pmd, addr, next, add, direct, altmap);
292 if (ret)
293 goto out;
294 if (!add)
295 try_free_pte_table(pmd, addr & PMD_MASK);
296 }
297 ret = 0;
298 out:
299 if (direct)
300 update_page_count(PG_DIRECT_MAP_1M, add ? pages : -pages);
301 return ret;
302 }
303
try_free_pmd_table(pud_t * pud,unsigned long start)304 static void try_free_pmd_table(pud_t *pud, unsigned long start)
305 {
306 pmd_t *pmd;
307 int i;
308
309 pmd = pmd_offset(pud, start);
310 for (i = 0; i < PTRS_PER_PMD; i++, pmd++)
311 if (!pmd_none(*pmd))
312 return;
313 vmem_free_pages(pud_deref(*pud), CRST_ALLOC_ORDER, NULL);
314 pud_clear(pud);
315 }
316
modify_pud_table(p4d_t * p4d,unsigned long addr,unsigned long end,bool add,bool direct,struct vmem_altmap * altmap)317 static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
318 bool add, bool direct, struct vmem_altmap *altmap)
319 {
320 unsigned long next, prot, pages = 0;
321 int ret = -ENOMEM;
322 pud_t *pud;
323 pmd_t *pmd;
324
325 prot = pgprot_val(REGION3_KERNEL);
326 if (!MACHINE_HAS_NX)
327 prot &= ~_REGION_ENTRY_NOEXEC;
328 pud = pud_offset(p4d, addr);
329 for (; addr < end; addr = next, pud++) {
330 next = pud_addr_end(addr, end);
331 if (!add) {
332 if (pud_none(*pud))
333 continue;
334 if (pud_leaf(*pud)) {
335 if (IS_ALIGNED(addr, PUD_SIZE) &&
336 IS_ALIGNED(next, PUD_SIZE)) {
337 pud_clear(pud);
338 pages++;
339 }
340 continue;
341 }
342 } else if (pud_none(*pud)) {
343 if (IS_ALIGNED(addr, PUD_SIZE) &&
344 IS_ALIGNED(next, PUD_SIZE) &&
345 MACHINE_HAS_EDAT2 && direct &&
346 !debug_pagealloc_enabled()) {
347 set_pud(pud, __pud(__pa(addr) | prot));
348 pages++;
349 continue;
350 }
351 pmd = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
352 if (!pmd)
353 goto out;
354 pud_populate(&init_mm, pud, pmd);
355 } else if (pud_leaf(*pud)) {
356 continue;
357 }
358 ret = modify_pmd_table(pud, addr, next, add, direct, altmap);
359 if (ret)
360 goto out;
361 if (!add)
362 try_free_pmd_table(pud, addr & PUD_MASK);
363 }
364 ret = 0;
365 out:
366 if (direct)
367 update_page_count(PG_DIRECT_MAP_2G, add ? pages : -pages);
368 return ret;
369 }
370
try_free_pud_table(p4d_t * p4d,unsigned long start)371 static void try_free_pud_table(p4d_t *p4d, unsigned long start)
372 {
373 pud_t *pud;
374 int i;
375
376 pud = pud_offset(p4d, start);
377 for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
378 if (!pud_none(*pud))
379 return;
380 }
381 vmem_free_pages(p4d_deref(*p4d), CRST_ALLOC_ORDER, NULL);
382 p4d_clear(p4d);
383 }
384
modify_p4d_table(pgd_t * pgd,unsigned long addr,unsigned long end,bool add,bool direct,struct vmem_altmap * altmap)385 static int modify_p4d_table(pgd_t *pgd, unsigned long addr, unsigned long end,
386 bool add, bool direct, struct vmem_altmap *altmap)
387 {
388 unsigned long next;
389 int ret = -ENOMEM;
390 p4d_t *p4d;
391 pud_t *pud;
392
393 p4d = p4d_offset(pgd, addr);
394 for (; addr < end; addr = next, p4d++) {
395 next = p4d_addr_end(addr, end);
396 if (!add) {
397 if (p4d_none(*p4d))
398 continue;
399 } else if (p4d_none(*p4d)) {
400 pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
401 if (!pud)
402 goto out;
403 p4d_populate(&init_mm, p4d, pud);
404 }
405 ret = modify_pud_table(p4d, addr, next, add, direct, altmap);
406 if (ret)
407 goto out;
408 if (!add)
409 try_free_pud_table(p4d, addr & P4D_MASK);
410 }
411 ret = 0;
412 out:
413 return ret;
414 }
415
try_free_p4d_table(pgd_t * pgd,unsigned long start)416 static void try_free_p4d_table(pgd_t *pgd, unsigned long start)
417 {
418 p4d_t *p4d;
419 int i;
420
421 p4d = p4d_offset(pgd, start);
422 for (i = 0; i < PTRS_PER_P4D; i++, p4d++) {
423 if (!p4d_none(*p4d))
424 return;
425 }
426 vmem_free_pages(pgd_deref(*pgd), CRST_ALLOC_ORDER, NULL);
427 pgd_clear(pgd);
428 }
429
modify_pagetable(unsigned long start,unsigned long end,bool add,bool direct,struct vmem_altmap * altmap)430 static int modify_pagetable(unsigned long start, unsigned long end, bool add,
431 bool direct, struct vmem_altmap *altmap)
432 {
433 unsigned long addr, next;
434 int ret = -ENOMEM;
435 pgd_t *pgd;
436 p4d_t *p4d;
437
438 if (WARN_ON_ONCE(!PAGE_ALIGNED(start | end)))
439 return -EINVAL;
440 /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
441 if (WARN_ON_ONCE(end > __abs_lowcore))
442 return -EINVAL;
443 for (addr = start; addr < end; addr = next) {
444 next = pgd_addr_end(addr, end);
445 pgd = pgd_offset_k(addr);
446
447 if (!add) {
448 if (pgd_none(*pgd))
449 continue;
450 } else if (pgd_none(*pgd)) {
451 p4d = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
452 if (!p4d)
453 goto out;
454 pgd_populate(&init_mm, pgd, p4d);
455 }
456 ret = modify_p4d_table(pgd, addr, next, add, direct, altmap);
457 if (ret)
458 goto out;
459 if (!add)
460 try_free_p4d_table(pgd, addr & PGDIR_MASK);
461 }
462 ret = 0;
463 out:
464 if (!add)
465 flush_tlb_kernel_range(start, end);
466 return ret;
467 }
468
add_pagetable(unsigned long start,unsigned long end,bool direct,struct vmem_altmap * altmap)469 static int add_pagetable(unsigned long start, unsigned long end, bool direct,
470 struct vmem_altmap *altmap)
471 {
472 return modify_pagetable(start, end, true, direct, altmap);
473 }
474
remove_pagetable(unsigned long start,unsigned long end,bool direct,struct vmem_altmap * altmap)475 static int remove_pagetable(unsigned long start, unsigned long end, bool direct,
476 struct vmem_altmap *altmap)
477 {
478 return modify_pagetable(start, end, false, direct, altmap);
479 }
480
481 /*
482 * Add a physical memory range to the 1:1 mapping.
483 */
vmem_add_range(unsigned long start,unsigned long size)484 static int vmem_add_range(unsigned long start, unsigned long size)
485 {
486 start = (unsigned long)__va(start);
487 return add_pagetable(start, start + size, true, NULL);
488 }
489
490 /*
491 * Remove a physical memory range from the 1:1 mapping.
492 */
vmem_remove_range(unsigned long start,unsigned long size)493 static void vmem_remove_range(unsigned long start, unsigned long size)
494 {
495 start = (unsigned long)__va(start);
496 remove_pagetable(start, start + size, true, NULL);
497 }
498
499 /*
500 * Add a backed mem_map array to the virtual mem_map array.
501 */
vmemmap_populate(unsigned long start,unsigned long end,int node,struct vmem_altmap * altmap)502 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
503 struct vmem_altmap *altmap)
504 {
505 int ret;
506
507 mutex_lock(&vmem_mutex);
508 /* We don't care about the node, just use NUMA_NO_NODE on allocations */
509 ret = add_pagetable(start, end, false, altmap);
510 if (ret)
511 remove_pagetable(start, end, false, altmap);
512 mutex_unlock(&vmem_mutex);
513 return ret;
514 }
515
516 #ifdef CONFIG_MEMORY_HOTPLUG
517
vmemmap_free(unsigned long start,unsigned long end,struct vmem_altmap * altmap)518 void vmemmap_free(unsigned long start, unsigned long end,
519 struct vmem_altmap *altmap)
520 {
521 mutex_lock(&vmem_mutex);
522 remove_pagetable(start, end, false, altmap);
523 mutex_unlock(&vmem_mutex);
524 }
525
526 #endif
527
vmem_remove_mapping(unsigned long start,unsigned long size)528 void vmem_remove_mapping(unsigned long start, unsigned long size)
529 {
530 mutex_lock(&vmem_mutex);
531 vmem_remove_range(start, size);
532 mutex_unlock(&vmem_mutex);
533 }
534
arch_get_mappable_range(void)535 struct range arch_get_mappable_range(void)
536 {
537 struct range mhp_range;
538
539 mhp_range.start = 0;
540 mhp_range.end = max_mappable - 1;
541 return mhp_range;
542 }
543
vmem_add_mapping(unsigned long start,unsigned long size)544 int vmem_add_mapping(unsigned long start, unsigned long size)
545 {
546 struct range range = arch_get_mappable_range();
547 int ret;
548
549 if (start < range.start ||
550 start + size > range.end + 1 ||
551 start + size < start)
552 return -ERANGE;
553
554 mutex_lock(&vmem_mutex);
555 ret = vmem_add_range(start, size);
556 if (ret)
557 vmem_remove_range(start, size);
558 mutex_unlock(&vmem_mutex);
559 return ret;
560 }
561
562 /*
563 * Allocate new or return existing page-table entry, but do not map it
564 * to any physical address. If missing, allocate segment- and region-
565 * table entries along. Meeting a large segment- or region-table entry
566 * while traversing is an error, since the function is expected to be
567 * called against virtual regions reserved for 4KB mappings only.
568 */
vmem_get_alloc_pte(unsigned long addr,bool alloc)569 pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc)
570 {
571 pte_t *ptep = NULL;
572 pgd_t *pgd;
573 p4d_t *p4d;
574 pud_t *pud;
575 pmd_t *pmd;
576 pte_t *pte;
577
578 pgd = pgd_offset_k(addr);
579 if (pgd_none(*pgd)) {
580 if (!alloc)
581 goto out;
582 p4d = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
583 if (!p4d)
584 goto out;
585 pgd_populate(&init_mm, pgd, p4d);
586 }
587 p4d = p4d_offset(pgd, addr);
588 if (p4d_none(*p4d)) {
589 if (!alloc)
590 goto out;
591 pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
592 if (!pud)
593 goto out;
594 p4d_populate(&init_mm, p4d, pud);
595 }
596 pud = pud_offset(p4d, addr);
597 if (pud_none(*pud)) {
598 if (!alloc)
599 goto out;
600 pmd = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
601 if (!pmd)
602 goto out;
603 pud_populate(&init_mm, pud, pmd);
604 } else if (WARN_ON_ONCE(pud_leaf(*pud))) {
605 goto out;
606 }
607 pmd = pmd_offset(pud, addr);
608 if (pmd_none(*pmd)) {
609 if (!alloc)
610 goto out;
611 pte = vmem_pte_alloc();
612 if (!pte)
613 goto out;
614 pmd_populate(&init_mm, pmd, pte);
615 } else if (WARN_ON_ONCE(pmd_leaf(*pmd))) {
616 goto out;
617 }
618 ptep = pte_offset_kernel(pmd, addr);
619 out:
620 return ptep;
621 }
622
__vmem_map_4k_page(unsigned long addr,unsigned long phys,pgprot_t prot,bool alloc)623 int __vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot, bool alloc)
624 {
625 pte_t *ptep, pte;
626
627 if (!IS_ALIGNED(addr, PAGE_SIZE))
628 return -EINVAL;
629 ptep = vmem_get_alloc_pte(addr, alloc);
630 if (!ptep)
631 return -ENOMEM;
632 __ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL);
633 pte = mk_pte_phys(phys, prot);
634 set_pte(ptep, pte);
635 return 0;
636 }
637
vmem_map_4k_page(unsigned long addr,unsigned long phys,pgprot_t prot)638 int vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot)
639 {
640 int rc;
641
642 mutex_lock(&vmem_mutex);
643 rc = __vmem_map_4k_page(addr, phys, prot, true);
644 mutex_unlock(&vmem_mutex);
645 return rc;
646 }
647
vmem_unmap_4k_page(unsigned long addr)648 void vmem_unmap_4k_page(unsigned long addr)
649 {
650 pte_t *ptep;
651
652 mutex_lock(&vmem_mutex);
653 ptep = virt_to_kpte(addr);
654 __ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL);
655 pte_clear(&init_mm, addr, ptep);
656 mutex_unlock(&vmem_mutex);
657 }
658
vmem_map_init(void)659 void __init vmem_map_init(void)
660 {
661 __set_memory_rox(_stext, _etext);
662 __set_memory_ro(_etext, __end_rodata);
663 __set_memory_rox(__stext_amode31, __etext_amode31);
664 /*
665 * If the BEAR-enhancement facility is not installed the first
666 * prefix page is used to return to the previous context with
667 * an LPSWE instruction and therefore must be executable.
668 */
669 if (!static_key_enabled(&cpu_has_bear))
670 set_memory_x(0, 1);
671 if (debug_pagealloc_enabled())
672 __set_memory_4k(__va(0), __va(0) + ident_map_size);
673 pr_info("Write protected kernel read-only data: %luk\n",
674 (unsigned long)(__end_rodata - _stext) >> 10);
675 }
676