• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Handle caching attributes in page tables (PAT)
4  *
5  * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
6  *          Suresh B Siddha <suresh.b.siddha@intel.com>
7  *
8  * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
9  */
10 
11 #include <linux/seq_file.h>
12 #include <linux/memblock.h>
13 #include <linux/debugfs.h>
14 #include <linux/ioport.h>
15 #include <linux/kernel.h>
16 #include <linux/pfn_t.h>
17 #include <linux/slab.h>
18 #include <linux/mm.h>
19 #include <linux/fs.h>
20 #include <linux/rbtree.h>
21 
22 #include <asm/cacheflush.h>
23 #include <asm/processor.h>
24 #include <asm/tlbflush.h>
25 #include <asm/x86_init.h>
26 #include <asm/pgtable.h>
27 #include <asm/fcntl.h>
28 #include <asm/e820/api.h>
29 #include <asm/mtrr.h>
30 #include <asm/page.h>
31 #include <asm/msr.h>
32 #include <asm/pat.h>
33 #include <asm/io.h>
34 
35 #include "pat_internal.h"
36 #include "mm_internal.h"
37 #include "../../mm/internal.h"	/* is_cow_mapping() */
38 
39 #undef pr_fmt
40 #define pr_fmt(fmt) "" fmt
41 
42 static bool __read_mostly boot_cpu_done;
43 static bool __read_mostly pat_disabled = !IS_ENABLED(CONFIG_X86_PAT);
44 static bool __read_mostly pat_initialized;
45 static bool __read_mostly init_cm_done;
46 
pat_disable(const char * reason)47 void pat_disable(const char *reason)
48 {
49 	if (pat_disabled)
50 		return;
51 
52 	if (boot_cpu_done) {
53 		WARN_ONCE(1, "x86/PAT: PAT cannot be disabled after initialization\n");
54 		return;
55 	}
56 
57 	pat_disabled = true;
58 	pr_info("x86/PAT: %s\n", reason);
59 }
60 
nopat(char * str)61 static int __init nopat(char *str)
62 {
63 	pat_disable("PAT support disabled.");
64 	return 0;
65 }
66 early_param("nopat", nopat);
67 
pat_enabled(void)68 bool pat_enabled(void)
69 {
70 	return pat_initialized;
71 }
72 EXPORT_SYMBOL_GPL(pat_enabled);
73 
74 int pat_debug_enable;
75 
pat_debug_setup(char * str)76 static int __init pat_debug_setup(char *str)
77 {
78 	pat_debug_enable = 1;
79 	return 1;
80 }
81 __setup("debugpat", pat_debug_setup);
82 
83 #ifdef CONFIG_X86_PAT
84 /*
85  * X86 PAT uses page flags arch_1 and uncached together to keep track of
86  * memory type of pages that have backing page struct.
87  *
88  * X86 PAT supports 4 different memory types:
89  *  - _PAGE_CACHE_MODE_WB
90  *  - _PAGE_CACHE_MODE_WC
91  *  - _PAGE_CACHE_MODE_UC_MINUS
92  *  - _PAGE_CACHE_MODE_WT
93  *
94  * _PAGE_CACHE_MODE_WB is the default type.
95  */
96 
97 #define _PGMT_WB		0
98 #define _PGMT_WC		(1UL << PG_arch_1)
99 #define _PGMT_UC_MINUS		(1UL << PG_uncached)
100 #define _PGMT_WT		(1UL << PG_uncached | 1UL << PG_arch_1)
101 #define _PGMT_MASK		(1UL << PG_uncached | 1UL << PG_arch_1)
102 #define _PGMT_CLEAR_MASK	(~_PGMT_MASK)
103 
get_page_memtype(struct page * pg)104 static inline enum page_cache_mode get_page_memtype(struct page *pg)
105 {
106 	unsigned long pg_flags = pg->flags & _PGMT_MASK;
107 
108 	if (pg_flags == _PGMT_WB)
109 		return _PAGE_CACHE_MODE_WB;
110 	else if (pg_flags == _PGMT_WC)
111 		return _PAGE_CACHE_MODE_WC;
112 	else if (pg_flags == _PGMT_UC_MINUS)
113 		return _PAGE_CACHE_MODE_UC_MINUS;
114 	else
115 		return _PAGE_CACHE_MODE_WT;
116 }
117 
set_page_memtype(struct page * pg,enum page_cache_mode memtype)118 static inline void set_page_memtype(struct page *pg,
119 				    enum page_cache_mode memtype)
120 {
121 	unsigned long memtype_flags;
122 	unsigned long old_flags;
123 	unsigned long new_flags;
124 
125 	switch (memtype) {
126 	case _PAGE_CACHE_MODE_WC:
127 		memtype_flags = _PGMT_WC;
128 		break;
129 	case _PAGE_CACHE_MODE_UC_MINUS:
130 		memtype_flags = _PGMT_UC_MINUS;
131 		break;
132 	case _PAGE_CACHE_MODE_WT:
133 		memtype_flags = _PGMT_WT;
134 		break;
135 	case _PAGE_CACHE_MODE_WB:
136 	default:
137 		memtype_flags = _PGMT_WB;
138 		break;
139 	}
140 
141 	do {
142 		old_flags = pg->flags;
143 		new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags;
144 	} while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags);
145 }
146 #else
get_page_memtype(struct page * pg)147 static inline enum page_cache_mode get_page_memtype(struct page *pg)
148 {
149 	return -1;
150 }
set_page_memtype(struct page * pg,enum page_cache_mode memtype)151 static inline void set_page_memtype(struct page *pg,
152 				    enum page_cache_mode memtype)
153 {
154 }
155 #endif
156 
157 enum {
158 	PAT_UC = 0,		/* uncached */
159 	PAT_WC = 1,		/* Write combining */
160 	PAT_WT = 4,		/* Write Through */
161 	PAT_WP = 5,		/* Write Protected */
162 	PAT_WB = 6,		/* Write Back (default) */
163 	PAT_UC_MINUS = 7,	/* UC, but can be overridden by MTRR */
164 };
165 
166 #define CM(c) (_PAGE_CACHE_MODE_ ## c)
167 
pat_get_cache_mode(unsigned pat_val,char * msg)168 static enum page_cache_mode pat_get_cache_mode(unsigned pat_val, char *msg)
169 {
170 	enum page_cache_mode cache;
171 	char *cache_mode;
172 
173 	switch (pat_val) {
174 	case PAT_UC:       cache = CM(UC);       cache_mode = "UC  "; break;
175 	case PAT_WC:       cache = CM(WC);       cache_mode = "WC  "; break;
176 	case PAT_WT:       cache = CM(WT);       cache_mode = "WT  "; break;
177 	case PAT_WP:       cache = CM(WP);       cache_mode = "WP  "; break;
178 	case PAT_WB:       cache = CM(WB);       cache_mode = "WB  "; break;
179 	case PAT_UC_MINUS: cache = CM(UC_MINUS); cache_mode = "UC- "; break;
180 	default:           cache = CM(WB);       cache_mode = "WB  "; break;
181 	}
182 
183 	memcpy(msg, cache_mode, 4);
184 
185 	return cache;
186 }
187 
188 #undef CM
189 
190 /*
191  * Update the cache mode to pgprot translation tables according to PAT
192  * configuration.
193  * Using lower indices is preferred, so we start with highest index.
194  */
__init_cache_modes(u64 pat)195 static void __init_cache_modes(u64 pat)
196 {
197 	enum page_cache_mode cache;
198 	char pat_msg[33];
199 	int i;
200 
201 	pat_msg[32] = 0;
202 	for (i = 7; i >= 0; i--) {
203 		cache = pat_get_cache_mode((pat >> (i * 8)) & 7,
204 					   pat_msg + 4 * i);
205 		update_cache_mode_entry(i, cache);
206 	}
207 	pr_info("x86/PAT: Configuration [0-7]: %s\n", pat_msg);
208 
209 	init_cm_done = true;
210 }
211 
212 #define PAT(x, y)	((u64)PAT_ ## y << ((x)*8))
213 
pat_bsp_init(u64 pat)214 static void pat_bsp_init(u64 pat)
215 {
216 	u64 tmp_pat;
217 
218 	if (!boot_cpu_has(X86_FEATURE_PAT)) {
219 		pat_disable("PAT not supported by CPU.");
220 		return;
221 	}
222 
223 	rdmsrl(MSR_IA32_CR_PAT, tmp_pat);
224 	if (!tmp_pat) {
225 		pat_disable("PAT MSR is 0, disabled.");
226 		return;
227 	}
228 
229 	wrmsrl(MSR_IA32_CR_PAT, pat);
230 	pat_initialized = true;
231 
232 	__init_cache_modes(pat);
233 }
234 
pat_ap_init(u64 pat)235 static void pat_ap_init(u64 pat)
236 {
237 	if (!boot_cpu_has(X86_FEATURE_PAT)) {
238 		/*
239 		 * If this happens we are on a secondary CPU, but switched to
240 		 * PAT on the boot CPU. We have no way to undo PAT.
241 		 */
242 		panic("x86/PAT: PAT enabled, but not supported by secondary CPU\n");
243 	}
244 
245 	wrmsrl(MSR_IA32_CR_PAT, pat);
246 }
247 
init_cache_modes(void)248 void init_cache_modes(void)
249 {
250 	u64 pat = 0;
251 
252 	if (init_cm_done)
253 		return;
254 
255 	if (boot_cpu_has(X86_FEATURE_PAT)) {
256 		/*
257 		 * CPU supports PAT. Set PAT table to be consistent with
258 		 * PAT MSR. This case supports "nopat" boot option, and
259 		 * virtual machine environments which support PAT without
260 		 * MTRRs. In specific, Xen has unique setup to PAT MSR.
261 		 *
262 		 * If PAT MSR returns 0, it is considered invalid and emulates
263 		 * as No PAT.
264 		 */
265 		rdmsrl(MSR_IA32_CR_PAT, pat);
266 	}
267 
268 	if (!pat) {
269 		/*
270 		 * No PAT. Emulate the PAT table that corresponds to the two
271 		 * cache bits, PWT (Write Through) and PCD (Cache Disable).
272 		 * This setup is also the same as the BIOS default setup.
273 		 *
274 		 * PTE encoding:
275 		 *
276 		 *       PCD
277 		 *       |PWT  PAT
278 		 *       ||    slot
279 		 *       00    0    WB : _PAGE_CACHE_MODE_WB
280 		 *       01    1    WT : _PAGE_CACHE_MODE_WT
281 		 *       10    2    UC-: _PAGE_CACHE_MODE_UC_MINUS
282 		 *       11    3    UC : _PAGE_CACHE_MODE_UC
283 		 *
284 		 * NOTE: When WC or WP is used, it is redirected to UC- per
285 		 * the default setup in __cachemode2pte_tbl[].
286 		 */
287 		pat = PAT(0, WB) | PAT(1, WT) | PAT(2, UC_MINUS) | PAT(3, UC) |
288 		      PAT(4, WB) | PAT(5, WT) | PAT(6, UC_MINUS) | PAT(7, UC);
289 	}
290 
291 	__init_cache_modes(pat);
292 }
293 
294 /**
295  * pat_init - Initialize PAT MSR and PAT table
296  *
297  * This function initializes PAT MSR and PAT table with an OS-defined value
298  * to enable additional cache attributes, WC, WT and WP.
299  *
300  * This function must be called on all CPUs using the specific sequence of
301  * operations defined in Intel SDM. mtrr_rendezvous_handler() provides this
302  * procedure for PAT.
303  */
pat_init(void)304 void pat_init(void)
305 {
306 	u64 pat;
307 	struct cpuinfo_x86 *c = &boot_cpu_data;
308 
309 	if (pat_disabled)
310 		return;
311 
312 	if ((c->x86_vendor == X86_VENDOR_INTEL) &&
313 	    (((c->x86 == 0x6) && (c->x86_model <= 0xd)) ||
314 	     ((c->x86 == 0xf) && (c->x86_model <= 0x6)))) {
315 		/*
316 		 * PAT support with the lower four entries. Intel Pentium 2,
317 		 * 3, M, and 4 are affected by PAT errata, which makes the
318 		 * upper four entries unusable. To be on the safe side, we don't
319 		 * use those.
320 		 *
321 		 *  PTE encoding:
322 		 *      PAT
323 		 *      |PCD
324 		 *      ||PWT  PAT
325 		 *      |||    slot
326 		 *      000    0    WB : _PAGE_CACHE_MODE_WB
327 		 *      001    1    WC : _PAGE_CACHE_MODE_WC
328 		 *      010    2    UC-: _PAGE_CACHE_MODE_UC_MINUS
329 		 *      011    3    UC : _PAGE_CACHE_MODE_UC
330 		 * PAT bit unused
331 		 *
332 		 * NOTE: When WT or WP is used, it is redirected to UC- per
333 		 * the default setup in __cachemode2pte_tbl[].
334 		 */
335 		pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
336 		      PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC);
337 	} else {
338 		/*
339 		 * Full PAT support.  We put WT in slot 7 to improve
340 		 * robustness in the presence of errata that might cause
341 		 * the high PAT bit to be ignored.  This way, a buggy slot 7
342 		 * access will hit slot 3, and slot 3 is UC, so at worst
343 		 * we lose performance without causing a correctness issue.
344 		 * Pentium 4 erratum N46 is an example for such an erratum,
345 		 * although we try not to use PAT at all on affected CPUs.
346 		 *
347 		 *  PTE encoding:
348 		 *      PAT
349 		 *      |PCD
350 		 *      ||PWT  PAT
351 		 *      |||    slot
352 		 *      000    0    WB : _PAGE_CACHE_MODE_WB
353 		 *      001    1    WC : _PAGE_CACHE_MODE_WC
354 		 *      010    2    UC-: _PAGE_CACHE_MODE_UC_MINUS
355 		 *      011    3    UC : _PAGE_CACHE_MODE_UC
356 		 *      100    4    WB : Reserved
357 		 *      101    5    WP : _PAGE_CACHE_MODE_WP
358 		 *      110    6    UC-: Reserved
359 		 *      111    7    WT : _PAGE_CACHE_MODE_WT
360 		 *
361 		 * The reserved slots are unused, but mapped to their
362 		 * corresponding types in the presence of PAT errata.
363 		 */
364 		pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
365 		      PAT(4, WB) | PAT(5, WP) | PAT(6, UC_MINUS) | PAT(7, WT);
366 	}
367 
368 	if (!boot_cpu_done) {
369 		pat_bsp_init(pat);
370 		boot_cpu_done = true;
371 	} else {
372 		pat_ap_init(pat);
373 	}
374 }
375 
376 #undef PAT
377 
378 static DEFINE_SPINLOCK(memtype_lock);	/* protects memtype accesses */
379 
380 /*
381  * Does intersection of PAT memory type and MTRR memory type and returns
382  * the resulting memory type as PAT understands it.
383  * (Type in pat and mtrr will not have same value)
384  * The intersection is based on "Effective Memory Type" tables in IA-32
385  * SDM vol 3a
386  */
pat_x_mtrr_type(u64 start,u64 end,enum page_cache_mode req_type)387 static unsigned long pat_x_mtrr_type(u64 start, u64 end,
388 				     enum page_cache_mode req_type)
389 {
390 	/*
391 	 * Look for MTRR hint to get the effective type in case where PAT
392 	 * request is for WB.
393 	 */
394 	if (req_type == _PAGE_CACHE_MODE_WB) {
395 		u8 mtrr_type, uniform;
396 
397 		mtrr_type = mtrr_type_lookup(start, end, &uniform);
398 		if (mtrr_type != MTRR_TYPE_WRBACK)
399 			return _PAGE_CACHE_MODE_UC_MINUS;
400 
401 		return _PAGE_CACHE_MODE_WB;
402 	}
403 
404 	return req_type;
405 }
406 
407 struct pagerange_state {
408 	unsigned long		cur_pfn;
409 	int			ram;
410 	int			not_ram;
411 };
412 
413 static int
pagerange_is_ram_callback(unsigned long initial_pfn,unsigned long total_nr_pages,void * arg)414 pagerange_is_ram_callback(unsigned long initial_pfn, unsigned long total_nr_pages, void *arg)
415 {
416 	struct pagerange_state *state = arg;
417 
418 	state->not_ram	|= initial_pfn > state->cur_pfn;
419 	state->ram	|= total_nr_pages > 0;
420 	state->cur_pfn	 = initial_pfn + total_nr_pages;
421 
422 	return state->ram && state->not_ram;
423 }
424 
pat_pagerange_is_ram(resource_size_t start,resource_size_t end)425 static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end)
426 {
427 	int ret = 0;
428 	unsigned long start_pfn = start >> PAGE_SHIFT;
429 	unsigned long end_pfn = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
430 	struct pagerange_state state = {start_pfn, 0, 0};
431 
432 	/*
433 	 * For legacy reasons, physical address range in the legacy ISA
434 	 * region is tracked as non-RAM. This will allow users of
435 	 * /dev/mem to map portions of legacy ISA region, even when
436 	 * some of those portions are listed(or not even listed) with
437 	 * different e820 types(RAM/reserved/..)
438 	 */
439 	if (start_pfn < ISA_END_ADDRESS >> PAGE_SHIFT)
440 		start_pfn = ISA_END_ADDRESS >> PAGE_SHIFT;
441 
442 	if (start_pfn < end_pfn) {
443 		ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn,
444 				&state, pagerange_is_ram_callback);
445 	}
446 
447 	return (ret > 0) ? -1 : (state.ram ? 1 : 0);
448 }
449 
450 /*
451  * For RAM pages, we use page flags to mark the pages with appropriate type.
452  * The page flags are limited to four types, WB (default), WC, WT and UC-.
453  * WP request fails with -EINVAL, and UC gets redirected to UC-.  Setting
454  * a new memory type is only allowed for a page mapped with the default WB
455  * type.
456  *
457  * Here we do two passes:
458  * - Find the memtype of all the pages in the range, look for any conflicts.
459  * - In case of no conflicts, set the new memtype for pages in the range.
460  */
reserve_ram_pages_type(u64 start,u64 end,enum page_cache_mode req_type,enum page_cache_mode * new_type)461 static int reserve_ram_pages_type(u64 start, u64 end,
462 				  enum page_cache_mode req_type,
463 				  enum page_cache_mode *new_type)
464 {
465 	struct page *page;
466 	u64 pfn;
467 
468 	if (req_type == _PAGE_CACHE_MODE_WP) {
469 		if (new_type)
470 			*new_type = _PAGE_CACHE_MODE_UC_MINUS;
471 		return -EINVAL;
472 	}
473 
474 	if (req_type == _PAGE_CACHE_MODE_UC) {
475 		/* We do not support strong UC */
476 		WARN_ON_ONCE(1);
477 		req_type = _PAGE_CACHE_MODE_UC_MINUS;
478 	}
479 
480 	for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
481 		enum page_cache_mode type;
482 
483 		page = pfn_to_page(pfn);
484 		type = get_page_memtype(page);
485 		if (type != _PAGE_CACHE_MODE_WB) {
486 			pr_info("x86/PAT: reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n",
487 				start, end - 1, type, req_type);
488 			if (new_type)
489 				*new_type = type;
490 
491 			return -EBUSY;
492 		}
493 	}
494 
495 	if (new_type)
496 		*new_type = req_type;
497 
498 	for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
499 		page = pfn_to_page(pfn);
500 		set_page_memtype(page, req_type);
501 	}
502 	return 0;
503 }
504 
free_ram_pages_type(u64 start,u64 end)505 static int free_ram_pages_type(u64 start, u64 end)
506 {
507 	struct page *page;
508 	u64 pfn;
509 
510 	for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
511 		page = pfn_to_page(pfn);
512 		set_page_memtype(page, _PAGE_CACHE_MODE_WB);
513 	}
514 	return 0;
515 }
516 
sanitize_phys(u64 address)517 static u64 sanitize_phys(u64 address)
518 {
519 	/*
520 	 * When changing the memtype for pages containing poison allow
521 	 * for a "decoy" virtual address (bit 63 clear) passed to
522 	 * set_memory_X(). __pa() on a "decoy" address results in a
523 	 * physical address with bit 63 set.
524 	 *
525 	 * Decoy addresses are not present for 32-bit builds, see
526 	 * set_mce_nospec().
527 	 */
528 	if (IS_ENABLED(CONFIG_X86_64))
529 		return address & __PHYSICAL_MASK;
530 	return address;
531 }
532 
533 /*
534  * req_type typically has one of the:
535  * - _PAGE_CACHE_MODE_WB
536  * - _PAGE_CACHE_MODE_WC
537  * - _PAGE_CACHE_MODE_UC_MINUS
538  * - _PAGE_CACHE_MODE_UC
539  * - _PAGE_CACHE_MODE_WT
540  *
541  * If new_type is NULL, function will return an error if it cannot reserve the
542  * region with req_type. If new_type is non-NULL, function will return
543  * available type in new_type in case of no error. In case of any error
544  * it will return a negative return value.
545  */
reserve_memtype(u64 start,u64 end,enum page_cache_mode req_type,enum page_cache_mode * new_type)546 int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type,
547 		    enum page_cache_mode *new_type)
548 {
549 	struct memtype *new;
550 	enum page_cache_mode actual_type;
551 	int is_range_ram;
552 	int err = 0;
553 
554 	start = sanitize_phys(start);
555 	end = sanitize_phys(end);
556 	if (start >= end) {
557 		WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__,
558 				start, end - 1, cattr_name(req_type));
559 		return -EINVAL;
560 	}
561 
562 	if (!pat_enabled()) {
563 		/* This is identical to page table setting without PAT */
564 		if (new_type)
565 			*new_type = req_type;
566 		return 0;
567 	}
568 
569 	/* Low ISA region is always mapped WB in page table. No need to track */
570 	if (x86_platform.is_untracked_pat_range(start, end)) {
571 		if (new_type)
572 			*new_type = _PAGE_CACHE_MODE_WB;
573 		return 0;
574 	}
575 
576 	/*
577 	 * Call mtrr_lookup to get the type hint. This is an
578 	 * optimization for /dev/mem mmap'ers into WB memory (BIOS
579 	 * tools and ACPI tools). Use WB request for WB memory and use
580 	 * UC_MINUS otherwise.
581 	 */
582 	actual_type = pat_x_mtrr_type(start, end, req_type);
583 
584 	if (new_type)
585 		*new_type = actual_type;
586 
587 	is_range_ram = pat_pagerange_is_ram(start, end);
588 	if (is_range_ram == 1) {
589 
590 		err = reserve_ram_pages_type(start, end, req_type, new_type);
591 
592 		return err;
593 	} else if (is_range_ram < 0) {
594 		return -EINVAL;
595 	}
596 
597 	new  = kzalloc(sizeof(struct memtype), GFP_KERNEL);
598 	if (!new)
599 		return -ENOMEM;
600 
601 	new->start	= start;
602 	new->end	= end;
603 	new->type	= actual_type;
604 
605 	spin_lock(&memtype_lock);
606 
607 	err = rbt_memtype_check_insert(new, new_type);
608 	if (err) {
609 		pr_info("x86/PAT: reserve_memtype failed [mem %#010Lx-%#010Lx], track %s, req %s\n",
610 			start, end - 1,
611 			cattr_name(new->type), cattr_name(req_type));
612 		kfree(new);
613 		spin_unlock(&memtype_lock);
614 
615 		return err;
616 	}
617 
618 	spin_unlock(&memtype_lock);
619 
620 	dprintk("reserve_memtype added [mem %#010Lx-%#010Lx], track %s, req %s, ret %s\n",
621 		start, end - 1, cattr_name(new->type), cattr_name(req_type),
622 		new_type ? cattr_name(*new_type) : "-");
623 
624 	return err;
625 }
626 
free_memtype(u64 start,u64 end)627 int free_memtype(u64 start, u64 end)
628 {
629 	int err = -EINVAL;
630 	int is_range_ram;
631 	struct memtype *entry;
632 
633 	if (!pat_enabled())
634 		return 0;
635 
636 	start = sanitize_phys(start);
637 	end = sanitize_phys(end);
638 
639 	/* Low ISA region is always mapped WB. No need to track */
640 	if (x86_platform.is_untracked_pat_range(start, end))
641 		return 0;
642 
643 	is_range_ram = pat_pagerange_is_ram(start, end);
644 	if (is_range_ram == 1) {
645 
646 		err = free_ram_pages_type(start, end);
647 
648 		return err;
649 	} else if (is_range_ram < 0) {
650 		return -EINVAL;
651 	}
652 
653 	spin_lock(&memtype_lock);
654 	entry = rbt_memtype_erase(start, end);
655 	spin_unlock(&memtype_lock);
656 
657 	if (IS_ERR(entry)) {
658 		pr_info("x86/PAT: %s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
659 			current->comm, current->pid, start, end - 1);
660 		return -EINVAL;
661 	}
662 
663 	kfree(entry);
664 
665 	dprintk("free_memtype request [mem %#010Lx-%#010Lx]\n", start, end - 1);
666 
667 	return 0;
668 }
669 
670 
671 /**
672  * lookup_memtype - Looksup the memory type for a physical address
673  * @paddr: physical address of which memory type needs to be looked up
674  *
675  * Only to be called when PAT is enabled
676  *
677  * Returns _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC, _PAGE_CACHE_MODE_UC_MINUS
678  * or _PAGE_CACHE_MODE_WT.
679  */
lookup_memtype(u64 paddr)680 static enum page_cache_mode lookup_memtype(u64 paddr)
681 {
682 	enum page_cache_mode rettype = _PAGE_CACHE_MODE_WB;
683 	struct memtype *entry;
684 
685 	if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE))
686 		return rettype;
687 
688 	if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) {
689 		struct page *page;
690 
691 		page = pfn_to_page(paddr >> PAGE_SHIFT);
692 		return get_page_memtype(page);
693 	}
694 
695 	spin_lock(&memtype_lock);
696 
697 	entry = rbt_memtype_lookup(paddr);
698 	if (entry != NULL)
699 		rettype = entry->type;
700 	else
701 		rettype = _PAGE_CACHE_MODE_UC_MINUS;
702 
703 	spin_unlock(&memtype_lock);
704 	return rettype;
705 }
706 
707 /**
708  * pat_pfn_immune_to_uc_mtrr - Check whether the PAT memory type
709  * of @pfn cannot be overridden by UC MTRR memory type.
710  *
711  * Only to be called when PAT is enabled.
712  *
713  * Returns true, if the PAT memory type of @pfn is UC, UC-, or WC.
714  * Returns false in other cases.
715  */
pat_pfn_immune_to_uc_mtrr(unsigned long pfn)716 bool pat_pfn_immune_to_uc_mtrr(unsigned long pfn)
717 {
718 	enum page_cache_mode cm = lookup_memtype(PFN_PHYS(pfn));
719 
720 	return cm == _PAGE_CACHE_MODE_UC ||
721 	       cm == _PAGE_CACHE_MODE_UC_MINUS ||
722 	       cm == _PAGE_CACHE_MODE_WC;
723 }
724 EXPORT_SYMBOL_GPL(pat_pfn_immune_to_uc_mtrr);
725 
726 /**
727  * io_reserve_memtype - Request a memory type mapping for a region of memory
728  * @start: start (physical address) of the region
729  * @end: end (physical address) of the region
730  * @type: A pointer to memtype, with requested type. On success, requested
731  * or any other compatible type that was available for the region is returned
732  *
733  * On success, returns 0
734  * On failure, returns non-zero
735  */
io_reserve_memtype(resource_size_t start,resource_size_t end,enum page_cache_mode * type)736 int io_reserve_memtype(resource_size_t start, resource_size_t end,
737 			enum page_cache_mode *type)
738 {
739 	resource_size_t size = end - start;
740 	enum page_cache_mode req_type = *type;
741 	enum page_cache_mode new_type;
742 	int ret;
743 
744 	WARN_ON_ONCE(iomem_map_sanity_check(start, size));
745 
746 	ret = reserve_memtype(start, end, req_type, &new_type);
747 	if (ret)
748 		goto out_err;
749 
750 	if (!is_new_memtype_allowed(start, size, req_type, new_type))
751 		goto out_free;
752 
753 	if (kernel_map_sync_memtype(start, size, new_type) < 0)
754 		goto out_free;
755 
756 	*type = new_type;
757 	return 0;
758 
759 out_free:
760 	free_memtype(start, end);
761 	ret = -EBUSY;
762 out_err:
763 	return ret;
764 }
765 
766 /**
767  * io_free_memtype - Release a memory type mapping for a region of memory
768  * @start: start (physical address) of the region
769  * @end: end (physical address) of the region
770  */
io_free_memtype(resource_size_t start,resource_size_t end)771 void io_free_memtype(resource_size_t start, resource_size_t end)
772 {
773 	free_memtype(start, end);
774 }
775 
arch_io_reserve_memtype_wc(resource_size_t start,resource_size_t size)776 int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size)
777 {
778 	enum page_cache_mode type = _PAGE_CACHE_MODE_WC;
779 
780 	return io_reserve_memtype(start, start + size, &type);
781 }
782 EXPORT_SYMBOL(arch_io_reserve_memtype_wc);
783 
arch_io_free_memtype_wc(resource_size_t start,resource_size_t size)784 void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size)
785 {
786 	io_free_memtype(start, start + size);
787 }
788 EXPORT_SYMBOL(arch_io_free_memtype_wc);
789 
phys_mem_access_prot(struct file * file,unsigned long pfn,unsigned long size,pgprot_t vma_prot)790 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
791 				unsigned long size, pgprot_t vma_prot)
792 {
793 	if (!phys_mem_access_encrypted(pfn << PAGE_SHIFT, size))
794 		vma_prot = pgprot_decrypted(vma_prot);
795 
796 	return vma_prot;
797 }
798 
799 #ifdef CONFIG_STRICT_DEVMEM
800 /* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM */
range_is_allowed(unsigned long pfn,unsigned long size)801 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
802 {
803 	return 1;
804 }
805 #else
806 /* This check is needed to avoid cache aliasing when PAT is enabled */
range_is_allowed(unsigned long pfn,unsigned long size)807 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
808 {
809 	u64 from = ((u64)pfn) << PAGE_SHIFT;
810 	u64 to = from + size;
811 	u64 cursor = from;
812 
813 	if (!pat_enabled())
814 		return 1;
815 
816 	while (cursor < to) {
817 		if (!devmem_is_allowed(pfn))
818 			return 0;
819 		cursor += PAGE_SIZE;
820 		pfn++;
821 	}
822 	return 1;
823 }
824 #endif /* CONFIG_STRICT_DEVMEM */
825 
phys_mem_access_prot_allowed(struct file * file,unsigned long pfn,unsigned long size,pgprot_t * vma_prot)826 int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
827 				unsigned long size, pgprot_t *vma_prot)
828 {
829 	enum page_cache_mode pcm = _PAGE_CACHE_MODE_WB;
830 
831 	if (!range_is_allowed(pfn, size))
832 		return 0;
833 
834 	if (file->f_flags & O_DSYNC)
835 		pcm = _PAGE_CACHE_MODE_UC_MINUS;
836 
837 	*vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
838 			     cachemode2protval(pcm));
839 	return 1;
840 }
841 
842 /*
843  * Change the memory type for the physial address range in kernel identity
844  * mapping space if that range is a part of identity map.
845  */
kernel_map_sync_memtype(u64 base,unsigned long size,enum page_cache_mode pcm)846 int kernel_map_sync_memtype(u64 base, unsigned long size,
847 			    enum page_cache_mode pcm)
848 {
849 	unsigned long id_sz;
850 
851 	if (base > __pa(high_memory-1))
852 		return 0;
853 
854 	/*
855 	 * some areas in the middle of the kernel identity range
856 	 * are not mapped, like the PCI space.
857 	 */
858 	if (!page_is_ram(base >> PAGE_SHIFT))
859 		return 0;
860 
861 	id_sz = (__pa(high_memory-1) <= base + size) ?
862 				__pa(high_memory) - base :
863 				size;
864 
865 	if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) {
866 		pr_info("x86/PAT: %s:%d ioremap_change_attr failed %s for [mem %#010Lx-%#010Lx]\n",
867 			current->comm, current->pid,
868 			cattr_name(pcm),
869 			base, (unsigned long long)(base + size-1));
870 		return -EINVAL;
871 	}
872 	return 0;
873 }
874 
875 /*
876  * Internal interface to reserve a range of physical memory with prot.
877  * Reserved non RAM regions only and after successful reserve_memtype,
878  * this func also keeps identity mapping (if any) in sync with this new prot.
879  */
reserve_pfn_range(u64 paddr,unsigned long size,pgprot_t * vma_prot,int strict_prot)880 static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
881 				int strict_prot)
882 {
883 	int is_ram = 0;
884 	int ret;
885 	enum page_cache_mode want_pcm = pgprot2cachemode(*vma_prot);
886 	enum page_cache_mode pcm = want_pcm;
887 
888 	is_ram = pat_pagerange_is_ram(paddr, paddr + size);
889 
890 	/*
891 	 * reserve_pfn_range() for RAM pages. We do not refcount to keep
892 	 * track of number of mappings of RAM pages. We can assert that
893 	 * the type requested matches the type of first page in the range.
894 	 */
895 	if (is_ram) {
896 		if (!pat_enabled())
897 			return 0;
898 
899 		pcm = lookup_memtype(paddr);
900 		if (want_pcm != pcm) {
901 			pr_warn("x86/PAT: %s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
902 				current->comm, current->pid,
903 				cattr_name(want_pcm),
904 				(unsigned long long)paddr,
905 				(unsigned long long)(paddr + size - 1),
906 				cattr_name(pcm));
907 			*vma_prot = __pgprot((pgprot_val(*vma_prot) &
908 					     (~_PAGE_CACHE_MASK)) |
909 					     cachemode2protval(pcm));
910 		}
911 		return 0;
912 	}
913 
914 	ret = reserve_memtype(paddr, paddr + size, want_pcm, &pcm);
915 	if (ret)
916 		return ret;
917 
918 	if (pcm != want_pcm) {
919 		if (strict_prot ||
920 		    !is_new_memtype_allowed(paddr, size, want_pcm, pcm)) {
921 			free_memtype(paddr, paddr + size);
922 			pr_err("x86/PAT: %s:%d map pfn expected mapping type %s for [mem %#010Lx-%#010Lx], got %s\n",
923 			       current->comm, current->pid,
924 			       cattr_name(want_pcm),
925 			       (unsigned long long)paddr,
926 			       (unsigned long long)(paddr + size - 1),
927 			       cattr_name(pcm));
928 			return -EINVAL;
929 		}
930 		/*
931 		 * We allow returning different type than the one requested in
932 		 * non strict case.
933 		 */
934 		*vma_prot = __pgprot((pgprot_val(*vma_prot) &
935 				      (~_PAGE_CACHE_MASK)) |
936 				     cachemode2protval(pcm));
937 	}
938 
939 	if (kernel_map_sync_memtype(paddr, size, pcm) < 0) {
940 		free_memtype(paddr, paddr + size);
941 		return -EINVAL;
942 	}
943 	return 0;
944 }
945 
946 /*
947  * Internal interface to free a range of physical memory.
948  * Frees non RAM regions only.
949  */
free_pfn_range(u64 paddr,unsigned long size)950 static void free_pfn_range(u64 paddr, unsigned long size)
951 {
952 	int is_ram;
953 
954 	is_ram = pat_pagerange_is_ram(paddr, paddr + size);
955 	if (is_ram == 0)
956 		free_memtype(paddr, paddr + size);
957 }
958 
get_pat_info(struct vm_area_struct * vma,resource_size_t * paddr,pgprot_t * pgprot)959 static int get_pat_info(struct vm_area_struct *vma, resource_size_t *paddr,
960 		pgprot_t *pgprot)
961 {
962 	unsigned long prot;
963 
964 	VM_WARN_ON_ONCE(!(vma->vm_flags & VM_PAT));
965 
966 	/*
967 	 * We need the starting PFN and cachemode used for track_pfn_remap()
968 	 * that covered the whole VMA. For most mappings, we can obtain that
969 	 * information from the page tables. For COW mappings, we might now
970 	 * suddenly have anon folios mapped and follow_phys() will fail.
971 	 *
972 	 * Fallback to using vma->vm_pgoff, see remap_pfn_range_notrack(), to
973 	 * detect the PFN. If we need the cachemode as well, we're out of luck
974 	 * for now and have to fail fork().
975 	 */
976 	if (!follow_phys(vma, vma->vm_start, 0, &prot, paddr)) {
977 		if (pgprot)
978 			*pgprot = __pgprot(prot);
979 		return 0;
980 	}
981 	if (is_cow_mapping(vma->vm_flags)) {
982 		if (pgprot)
983 			return -EINVAL;
984 		*paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
985 		return 0;
986 	}
987 	WARN_ON_ONCE(1);
988 	return -EINVAL;
989 }
990 
991 /*
992  * track_pfn_copy is called when vma that is covering the pfnmap gets
993  * copied through copy_page_range().
994  *
995  * If the vma has a linear pfn mapping for the entire range, we get the prot
996  * from pte and reserve the entire vma range with single reserve_pfn_range call.
997  */
track_pfn_copy(struct vm_area_struct * vma)998 int track_pfn_copy(struct vm_area_struct *vma)
999 {
1000 	resource_size_t paddr;
1001 	unsigned long vma_size = vma->vm_end - vma->vm_start;
1002 	pgprot_t pgprot;
1003 
1004 	if (vma->vm_flags & VM_PAT) {
1005 		if (get_pat_info(vma, &paddr, &pgprot))
1006 			return -EINVAL;
1007 		/* reserve the whole chunk covered by vma. */
1008 		return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
1009 	}
1010 
1011 	return 0;
1012 }
1013 
1014 /*
1015  * prot is passed in as a parameter for the new mapping. If the vma has
1016  * a linear pfn mapping for the entire range, or no vma is provided,
1017  * reserve the entire pfn + size range with single reserve_pfn_range
1018  * call.
1019  */
track_pfn_remap(struct vm_area_struct * vma,pgprot_t * prot,unsigned long pfn,unsigned long addr,unsigned long size)1020 int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
1021 		    unsigned long pfn, unsigned long addr, unsigned long size)
1022 {
1023 	resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT;
1024 	enum page_cache_mode pcm;
1025 
1026 	/* reserve the whole chunk starting from paddr */
1027 	if (!vma || (addr == vma->vm_start
1028 				&& size == (vma->vm_end - vma->vm_start))) {
1029 		int ret;
1030 
1031 		ret = reserve_pfn_range(paddr, size, prot, 0);
1032 		if (ret == 0 && vma)
1033 			vma->vm_flags |= VM_PAT;
1034 		return ret;
1035 	}
1036 
1037 	if (!pat_enabled())
1038 		return 0;
1039 
1040 	/*
1041 	 * For anything smaller than the vma size we set prot based on the
1042 	 * lookup.
1043 	 */
1044 	pcm = lookup_memtype(paddr);
1045 
1046 	/* Check memtype for the remaining pages */
1047 	while (size > PAGE_SIZE) {
1048 		size -= PAGE_SIZE;
1049 		paddr += PAGE_SIZE;
1050 		if (pcm != lookup_memtype(paddr))
1051 			return -EINVAL;
1052 	}
1053 
1054 	*prot = __pgprot((pgprot_val(*prot) & (~_PAGE_CACHE_MASK)) |
1055 			 cachemode2protval(pcm));
1056 
1057 	return 0;
1058 }
1059 
track_pfn_insert(struct vm_area_struct * vma,pgprot_t * prot,pfn_t pfn)1060 void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, pfn_t pfn)
1061 {
1062 	enum page_cache_mode pcm;
1063 
1064 	if (!pat_enabled())
1065 		return;
1066 
1067 	/* Set prot based on lookup */
1068 	pcm = lookup_memtype(pfn_t_to_phys(pfn));
1069 	*prot = __pgprot((pgprot_val(*prot) & (~_PAGE_CACHE_MASK)) |
1070 			 cachemode2protval(pcm));
1071 }
1072 
1073 /*
1074  * untrack_pfn is called while unmapping a pfnmap for a region.
1075  * untrack can be called for a specific region indicated by pfn and size or
1076  * can be for the entire vma (in which case pfn, size are zero).
1077  */
untrack_pfn(struct vm_area_struct * vma,unsigned long pfn,unsigned long size)1078 void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
1079 		 unsigned long size)
1080 {
1081 	resource_size_t paddr;
1082 
1083 	if (vma && !(vma->vm_flags & VM_PAT))
1084 		return;
1085 
1086 	/* free the chunk starting from pfn or the whole chunk */
1087 	paddr = (resource_size_t)pfn << PAGE_SHIFT;
1088 	if (!paddr && !size) {
1089 		if (get_pat_info(vma, &paddr, NULL))
1090 			return;
1091 		size = vma->vm_end - vma->vm_start;
1092 	}
1093 	free_pfn_range(paddr, size);
1094 	if (vma)
1095 		vma->vm_flags &= ~VM_PAT;
1096 }
1097 
1098 /*
1099  * untrack_pfn_moved is called, while mremapping a pfnmap for a new region,
1100  * with the old vma after its pfnmap page table has been removed.  The new
1101  * vma has a new pfnmap to the same pfn & cache type with VM_PAT set.
1102  */
untrack_pfn_moved(struct vm_area_struct * vma)1103 void untrack_pfn_moved(struct vm_area_struct *vma)
1104 {
1105 	vma->vm_flags &= ~VM_PAT;
1106 }
1107 
pgprot_writecombine(pgprot_t prot)1108 pgprot_t pgprot_writecombine(pgprot_t prot)
1109 {
1110 	return __pgprot(pgprot_val(prot) |
1111 				cachemode2protval(_PAGE_CACHE_MODE_WC));
1112 }
1113 EXPORT_SYMBOL_GPL(pgprot_writecombine);
1114 
pgprot_writethrough(pgprot_t prot)1115 pgprot_t pgprot_writethrough(pgprot_t prot)
1116 {
1117 	return __pgprot(pgprot_val(prot) |
1118 				cachemode2protval(_PAGE_CACHE_MODE_WT));
1119 }
1120 EXPORT_SYMBOL_GPL(pgprot_writethrough);
1121 
1122 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
1123 
memtype_get_idx(loff_t pos)1124 static struct memtype *memtype_get_idx(loff_t pos)
1125 {
1126 	struct memtype *print_entry;
1127 	int ret;
1128 
1129 	print_entry  = kzalloc(sizeof(struct memtype), GFP_KERNEL);
1130 	if (!print_entry)
1131 		return NULL;
1132 
1133 	spin_lock(&memtype_lock);
1134 	ret = rbt_memtype_copy_nth_element(print_entry, pos);
1135 	spin_unlock(&memtype_lock);
1136 
1137 	if (!ret) {
1138 		return print_entry;
1139 	} else {
1140 		kfree(print_entry);
1141 		return NULL;
1142 	}
1143 }
1144 
memtype_seq_start(struct seq_file * seq,loff_t * pos)1145 static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
1146 {
1147 	if (*pos == 0) {
1148 		++*pos;
1149 		seq_puts(seq, "PAT memtype list:\n");
1150 	}
1151 
1152 	return memtype_get_idx(*pos);
1153 }
1154 
memtype_seq_next(struct seq_file * seq,void * v,loff_t * pos)1155 static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1156 {
1157 	kfree(v);
1158 	++*pos;
1159 	return memtype_get_idx(*pos);
1160 }
1161 
memtype_seq_stop(struct seq_file * seq,void * v)1162 static void memtype_seq_stop(struct seq_file *seq, void *v)
1163 {
1164 	kfree(v);
1165 }
1166 
memtype_seq_show(struct seq_file * seq,void * v)1167 static int memtype_seq_show(struct seq_file *seq, void *v)
1168 {
1169 	struct memtype *print_entry = (struct memtype *)v;
1170 
1171 	seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type),
1172 			print_entry->start, print_entry->end);
1173 
1174 	return 0;
1175 }
1176 
1177 static const struct seq_operations memtype_seq_ops = {
1178 	.start = memtype_seq_start,
1179 	.next  = memtype_seq_next,
1180 	.stop  = memtype_seq_stop,
1181 	.show  = memtype_seq_show,
1182 };
1183 
memtype_seq_open(struct inode * inode,struct file * file)1184 static int memtype_seq_open(struct inode *inode, struct file *file)
1185 {
1186 	return seq_open(file, &memtype_seq_ops);
1187 }
1188 
1189 static const struct file_operations memtype_fops = {
1190 	.open    = memtype_seq_open,
1191 	.read    = seq_read,
1192 	.llseek  = seq_lseek,
1193 	.release = seq_release,
1194 };
1195 
pat_memtype_list_init(void)1196 static int __init pat_memtype_list_init(void)
1197 {
1198 	if (pat_enabled()) {
1199 		debugfs_create_file("pat_memtype_list", S_IRUSR,
1200 				    arch_debugfs_dir, NULL, &memtype_fops);
1201 	}
1202 	return 0;
1203 }
1204 
1205 late_initcall(pat_memtype_list_init);
1206 
1207 #endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */
1208