• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright IBM Corp. 2008
4  *
5  * Guest page hinting for unused pages.
6  *
7  * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/errno.h>
12 #include <linux/types.h>
13 #include <linux/mm.h>
14 #include <linux/memblock.h>
15 #include <linux/gfp.h>
16 #include <linux/init.h>
17 #include <asm/facility.h>
18 #include <asm/page-states.h>
19 
20 static int cmma_flag = 1;
21 
cmma(char * str)22 static int __init cmma(char *str)
23 {
24 	bool enabled;
25 
26 	if (!kstrtobool(str, &enabled))
27 		cmma_flag = enabled;
28 	return 1;
29 }
30 __setup("cmma=", cmma);
31 
cmma_test_essa(void)32 static inline int cmma_test_essa(void)
33 {
34 	register unsigned long tmp asm("0") = 0;
35 	register int rc asm("1");
36 
37 	/* test ESSA_GET_STATE */
38 	asm volatile(
39 		"	.insn	rrf,0xb9ab0000,%1,%1,%2,0\n"
40 		"0:     la      %0,0\n"
41 		"1:\n"
42 		EX_TABLE(0b,1b)
43 		: "=&d" (rc), "+&d" (tmp)
44 		: "i" (ESSA_GET_STATE), "0" (-EOPNOTSUPP));
45 	return rc;
46 }
47 
cmma_init(void)48 void __init cmma_init(void)
49 {
50 	if (!cmma_flag)
51 		return;
52 	if (cmma_test_essa()) {
53 		cmma_flag = 0;
54 		return;
55 	}
56 	if (test_facility(147))
57 		cmma_flag = 2;
58 }
59 
get_page_state(struct page * page)60 static inline unsigned char get_page_state(struct page *page)
61 {
62 	unsigned char state;
63 
64 	asm volatile("	.insn	rrf,0xb9ab0000,%0,%1,%2,0"
65 		     : "=&d" (state)
66 		     : "a" (page_to_phys(page)),
67 		       "i" (ESSA_GET_STATE));
68 	return state & 0x3f;
69 }
70 
set_page_unused(struct page * page,int order)71 static inline void set_page_unused(struct page *page, int order)
72 {
73 	int i, rc;
74 
75 	for (i = 0; i < (1 << order); i++)
76 		asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
77 			     : "=&d" (rc)
78 			     : "a" (page_to_phys(page + i)),
79 			       "i" (ESSA_SET_UNUSED));
80 }
81 
set_page_stable_dat(struct page * page,int order)82 static inline void set_page_stable_dat(struct page *page, int order)
83 {
84 	int i, rc;
85 
86 	for (i = 0; i < (1 << order); i++)
87 		asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
88 			     : "=&d" (rc)
89 			     : "a" (page_to_phys(page + i)),
90 			       "i" (ESSA_SET_STABLE));
91 }
92 
set_page_stable_nodat(struct page * page,int order)93 static inline void set_page_stable_nodat(struct page *page, int order)
94 {
95 	int i, rc;
96 
97 	for (i = 0; i < (1 << order); i++)
98 		asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
99 			     : "=&d" (rc)
100 			     : "a" (page_to_phys(page + i)),
101 			       "i" (ESSA_SET_STABLE_NODAT));
102 }
103 
mark_kernel_pmd(pud_t * pud,unsigned long addr,unsigned long end)104 static void mark_kernel_pmd(pud_t *pud, unsigned long addr, unsigned long end)
105 {
106 	unsigned long next;
107 	struct page *page;
108 	pmd_t *pmd;
109 
110 	pmd = pmd_offset(pud, addr);
111 	do {
112 		next = pmd_addr_end(addr, end);
113 		if (pmd_none(*pmd) || pmd_large(*pmd))
114 			continue;
115 		page = virt_to_page(pmd_val(*pmd));
116 		set_bit(PG_arch_1, &page->flags);
117 	} while (pmd++, addr = next, addr != end);
118 }
119 
mark_kernel_pud(p4d_t * p4d,unsigned long addr,unsigned long end)120 static void mark_kernel_pud(p4d_t *p4d, unsigned long addr, unsigned long end)
121 {
122 	unsigned long next;
123 	struct page *page;
124 	pud_t *pud;
125 	int i;
126 
127 	pud = pud_offset(p4d, addr);
128 	do {
129 		next = pud_addr_end(addr, end);
130 		if (pud_none(*pud) || pud_large(*pud))
131 			continue;
132 		if (!pud_folded(*pud)) {
133 			page = virt_to_page(pud_val(*pud));
134 			for (i = 0; i < 3; i++)
135 				set_bit(PG_arch_1, &page[i].flags);
136 		}
137 		mark_kernel_pmd(pud, addr, next);
138 	} while (pud++, addr = next, addr != end);
139 }
140 
mark_kernel_p4d(pgd_t * pgd,unsigned long addr,unsigned long end)141 static void mark_kernel_p4d(pgd_t *pgd, unsigned long addr, unsigned long end)
142 {
143 	unsigned long next;
144 	struct page *page;
145 	p4d_t *p4d;
146 	int i;
147 
148 	p4d = p4d_offset(pgd, addr);
149 	do {
150 		next = p4d_addr_end(addr, end);
151 		if (p4d_none(*p4d))
152 			continue;
153 		if (!p4d_folded(*p4d)) {
154 			page = virt_to_page(p4d_val(*p4d));
155 			for (i = 0; i < 3; i++)
156 				set_bit(PG_arch_1, &page[i].flags);
157 		}
158 		mark_kernel_pud(p4d, addr, next);
159 	} while (p4d++, addr = next, addr != end);
160 }
161 
mark_kernel_pgd(void)162 static void mark_kernel_pgd(void)
163 {
164 	unsigned long addr, next;
165 	struct page *page;
166 	pgd_t *pgd;
167 	int i;
168 
169 	addr = 0;
170 	pgd = pgd_offset_k(addr);
171 	do {
172 		next = pgd_addr_end(addr, MODULES_END);
173 		if (pgd_none(*pgd))
174 			continue;
175 		if (!pgd_folded(*pgd)) {
176 			page = virt_to_page(pgd_val(*pgd));
177 			for (i = 0; i < 3; i++)
178 				set_bit(PG_arch_1, &page[i].flags);
179 		}
180 		mark_kernel_p4d(pgd, addr, next);
181 	} while (pgd++, addr = next, addr != MODULES_END);
182 }
183 
cmma_init_nodat(void)184 void __init cmma_init_nodat(void)
185 {
186 	struct page *page;
187 	unsigned long start, end, ix;
188 	int i;
189 
190 	if (cmma_flag < 2)
191 		return;
192 	/* Mark pages used in kernel page tables */
193 	mark_kernel_pgd();
194 
195 	/* Set all kernel pages not used for page tables to stable/no-dat */
196 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
197 		page = pfn_to_page(start);
198 		for (ix = start; ix < end; ix++, page++) {
199 			if (__test_and_clear_bit(PG_arch_1, &page->flags))
200 				continue;	/* skip page table pages */
201 			if (!list_empty(&page->lru))
202 				continue;	/* skip free pages */
203 			set_page_stable_nodat(page, 0);
204 		}
205 	}
206 }
207 
arch_free_page(struct page * page,int order)208 void arch_free_page(struct page *page, int order)
209 {
210 	if (!cmma_flag)
211 		return;
212 	set_page_unused(page, order);
213 }
214 
arch_alloc_page(struct page * page,int order)215 void arch_alloc_page(struct page *page, int order)
216 {
217 	if (!cmma_flag)
218 		return;
219 	if (cmma_flag < 2)
220 		set_page_stable_dat(page, order);
221 	else
222 		set_page_stable_nodat(page, order);
223 }
224 
arch_set_page_dat(struct page * page,int order)225 void arch_set_page_dat(struct page *page, int order)
226 {
227 	if (!cmma_flag)
228 		return;
229 	set_page_stable_dat(page, order);
230 }
231 
arch_set_page_nodat(struct page * page,int order)232 void arch_set_page_nodat(struct page *page, int order)
233 {
234 	if (cmma_flag < 2)
235 		return;
236 	set_page_stable_nodat(page, order);
237 }
238 
arch_test_page_nodat(struct page * page)239 int arch_test_page_nodat(struct page *page)
240 {
241 	unsigned char state;
242 
243 	if (cmma_flag < 2)
244 		return 0;
245 	state = get_page_state(page);
246 	return !!(state & 0x20);
247 }
248 
arch_set_page_states(int make_stable)249 void arch_set_page_states(int make_stable)
250 {
251 	unsigned long flags, order, t;
252 	struct list_head *l;
253 	struct page *page;
254 	struct zone *zone;
255 
256 	if (!cmma_flag)
257 		return;
258 	if (make_stable)
259 		drain_local_pages(NULL);
260 	for_each_populated_zone(zone) {
261 		spin_lock_irqsave(&zone->lock, flags);
262 		for_each_migratetype_order(order, t) {
263 			list_for_each(l, &zone->free_area[order].free_list[t]) {
264 				page = list_entry(l, struct page, lru);
265 				if (make_stable)
266 					set_page_stable_dat(page, order);
267 				else
268 					set_page_unused(page, order);
269 			}
270 		}
271 		spin_unlock_irqrestore(&zone->lock, flags);
272 	}
273 }
274