• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  PowerPC version
4  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5  *
6  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
7  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
8  *    Copyright (C) 1996 Paul Mackerras
9  *
10  *  Derived from "arch/i386/mm/init.c"
11  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
12  *
13  *  Dave Engebretsen <engebret@us.ibm.com>
14  *      Rework for PPC64 port.
15  */
16 
17 #undef DEBUG
18 
19 #include <linux/string.h>
20 #include <linux/pgtable.h>
21 #include <asm/pgalloc.h>
22 #include <asm/kup.h>
23 
24 phys_addr_t memstart_addr __ro_after_init = (phys_addr_t)~0ull;
25 EXPORT_SYMBOL_GPL(memstart_addr);
26 phys_addr_t kernstart_addr __ro_after_init;
27 EXPORT_SYMBOL_GPL(kernstart_addr);
28 unsigned long kernstart_virt_addr __ro_after_init = KERNELBASE;
29 EXPORT_SYMBOL_GPL(kernstart_virt_addr);
30 
31 static bool disable_kuep = !IS_ENABLED(CONFIG_PPC_KUEP);
32 static bool disable_kuap = !IS_ENABLED(CONFIG_PPC_KUAP);
33 
parse_nosmep(char * p)34 static int __init parse_nosmep(char *p)
35 {
36 	disable_kuep = true;
37 	pr_warn("Disabling Kernel Userspace Execution Prevention\n");
38 	return 0;
39 }
40 early_param("nosmep", parse_nosmep);
41 
parse_nosmap(char * p)42 static int __init parse_nosmap(char *p)
43 {
44 	disable_kuap = true;
45 	pr_warn("Disabling Kernel Userspace Access Protection\n");
46 	return 0;
47 }
48 early_param("nosmap", parse_nosmap);
49 
setup_kup(void)50 void __ref setup_kup(void)
51 {
52 	setup_kuep(disable_kuep);
53 	setup_kuap(disable_kuap);
54 }
55 
56 #define CTOR(shift) static void ctor_##shift(void *addr) \
57 {							\
58 	memset(addr, 0, sizeof(void *) << (shift));	\
59 }
60 
61 CTOR(0); CTOR(1); CTOR(2); CTOR(3); CTOR(4); CTOR(5); CTOR(6); CTOR(7);
62 CTOR(8); CTOR(9); CTOR(10); CTOR(11); CTOR(12); CTOR(13); CTOR(14); CTOR(15);
63 
ctor(int shift)64 static inline void (*ctor(int shift))(void *)
65 {
66 	BUILD_BUG_ON(MAX_PGTABLE_INDEX_SIZE != 15);
67 
68 	switch (shift) {
69 	case 0: return ctor_0;
70 	case 1: return ctor_1;
71 	case 2: return ctor_2;
72 	case 3: return ctor_3;
73 	case 4: return ctor_4;
74 	case 5: return ctor_5;
75 	case 6: return ctor_6;
76 	case 7: return ctor_7;
77 	case 8: return ctor_8;
78 	case 9: return ctor_9;
79 	case 10: return ctor_10;
80 	case 11: return ctor_11;
81 	case 12: return ctor_12;
82 	case 13: return ctor_13;
83 	case 14: return ctor_14;
84 	case 15: return ctor_15;
85 	}
86 	return NULL;
87 }
88 
89 struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE + 1];
90 EXPORT_SYMBOL_GPL(pgtable_cache);	/* used by kvm_hv module */
91 
92 /*
93  * Create a kmem_cache() for pagetables.  This is not used for PTE
94  * pages - they're linked to struct page, come from the normal free
95  * pages pool and have a different entry size (see real_pte_t) to
96  * everything else.  Caches created by this function are used for all
97  * the higher level pagetables, and for hugepage pagetables.
98  */
pgtable_cache_add(unsigned int shift)99 void pgtable_cache_add(unsigned int shift)
100 {
101 	char *name;
102 	unsigned long table_size = sizeof(void *) << shift;
103 	unsigned long align = table_size;
104 
105 	/* When batching pgtable pointers for RCU freeing, we store
106 	 * the index size in the low bits.  Table alignment must be
107 	 * big enough to fit it.
108 	 *
109 	 * Likewise, hugeapge pagetable pointers contain a (different)
110 	 * shift value in the low bits.  All tables must be aligned so
111 	 * as to leave enough 0 bits in the address to contain it. */
112 	unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1,
113 				     HUGEPD_SHIFT_MASK + 1);
114 	struct kmem_cache *new;
115 
116 	/* It would be nice if this was a BUILD_BUG_ON(), but at the
117 	 * moment, gcc doesn't seem to recognize is_power_of_2 as a
118 	 * constant expression, so so much for that. */
119 	BUG_ON(!is_power_of_2(minalign));
120 	BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
121 
122 	if (PGT_CACHE(shift))
123 		return; /* Already have a cache of this size */
124 
125 	align = max_t(unsigned long, align, minalign);
126 	name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
127 	new = kmem_cache_create(name, table_size, align, 0, ctor(shift));
128 	if (!new)
129 		panic("Could not allocate pgtable cache for order %d", shift);
130 
131 	kfree(name);
132 	pgtable_cache[shift] = new;
133 
134 	pr_debug("Allocated pgtable cache for order %d\n", shift);
135 }
136 EXPORT_SYMBOL_GPL(pgtable_cache_add);	/* used by kvm_hv module */
137 
pgtable_cache_init(void)138 void pgtable_cache_init(void)
139 {
140 	pgtable_cache_add(PGD_INDEX_SIZE);
141 
142 	if (PMD_CACHE_INDEX)
143 		pgtable_cache_add(PMD_CACHE_INDEX);
144 	/*
145 	 * In all current configs, when the PUD index exists it's the
146 	 * same size as either the pgd or pmd index except with THP enabled
147 	 * on book3s 64
148 	 */
149 	if (PUD_CACHE_INDEX)
150 		pgtable_cache_add(PUD_CACHE_INDEX);
151 }
152