• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Re-map IO memory to kernel address space so that we can access it.
4  * This is needed for high PCI addresses that aren't mapped in the
5  * 640k-1MB IO memory area on PC's
6  *
7  * (C) Copyright 1995 1996 Linus Torvalds
8  */
9 
10 #include <linux/memblock.h>
11 #include <linux/init.h>
12 #include <linux/io.h>
13 #include <linux/ioport.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/mmiotrace.h>
17 #include <linux/mem_encrypt.h>
18 #include <linux/efi.h>
19 #include <linux/pgtable.h>
20 
21 #include <asm/set_memory.h>
22 #include <asm/e820/api.h>
23 #include <asm/efi.h>
24 #include <asm/fixmap.h>
25 #include <asm/tlbflush.h>
26 #include <asm/pgalloc.h>
27 #include <asm/memtype.h>
28 #include <asm/setup.h>
29 
30 #include "physaddr.h"
31 
32 /*
33  * Descriptor controlling ioremap() behavior.
34  */
35 struct ioremap_desc {
36 	unsigned int flags;
37 };
38 
39 /*
40  * Fix up the linear direct mapping of the kernel to avoid cache attribute
41  * conflicts.
42  */
ioremap_change_attr(unsigned long vaddr,unsigned long size,enum page_cache_mode pcm)43 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
44 			enum page_cache_mode pcm)
45 {
46 	unsigned long nrpages = size >> PAGE_SHIFT;
47 	int err;
48 
49 	switch (pcm) {
50 	case _PAGE_CACHE_MODE_UC:
51 	default:
52 		err = _set_memory_uc(vaddr, nrpages);
53 		break;
54 	case _PAGE_CACHE_MODE_WC:
55 		err = _set_memory_wc(vaddr, nrpages);
56 		break;
57 	case _PAGE_CACHE_MODE_WT:
58 		err = _set_memory_wt(vaddr, nrpages);
59 		break;
60 	case _PAGE_CACHE_MODE_WB:
61 		err = _set_memory_wb(vaddr, nrpages);
62 		break;
63 	}
64 
65 	return err;
66 }
67 
68 /* Does the range (or a subset of) contain normal RAM? */
__ioremap_check_ram(struct resource * res)69 static unsigned int __ioremap_check_ram(struct resource *res)
70 {
71 	unsigned long start_pfn, stop_pfn;
72 	unsigned long i;
73 
74 	if ((res->flags & IORESOURCE_SYSTEM_RAM) != IORESOURCE_SYSTEM_RAM)
75 		return 0;
76 
77 	start_pfn = (res->start + PAGE_SIZE - 1) >> PAGE_SHIFT;
78 	stop_pfn = (res->end + 1) >> PAGE_SHIFT;
79 	if (stop_pfn > start_pfn) {
80 		for (i = 0; i < (stop_pfn - start_pfn); ++i)
81 			if (pfn_valid(start_pfn + i) &&
82 			    !PageReserved(pfn_to_page(start_pfn + i)))
83 				return IORES_MAP_SYSTEM_RAM;
84 	}
85 
86 	return 0;
87 }
88 
89 /*
90  * In a SEV guest, NONE and RESERVED should not be mapped encrypted because
91  * there the whole memory is already encrypted.
92  */
__ioremap_check_encrypted(struct resource * res)93 static unsigned int __ioremap_check_encrypted(struct resource *res)
94 {
95 	if (!sev_active())
96 		return 0;
97 
98 	switch (res->desc) {
99 	case IORES_DESC_NONE:
100 	case IORES_DESC_RESERVED:
101 		break;
102 	default:
103 		return IORES_MAP_ENCRYPTED;
104 	}
105 
106 	return 0;
107 }
108 
109 /*
110  * The EFI runtime services data area is not covered by walk_mem_res(), but must
111  * be mapped encrypted when SEV is active.
112  */
__ioremap_check_other(resource_size_t addr,struct ioremap_desc * desc)113 static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *desc)
114 {
115 	if (!sev_active())
116 		return;
117 
118 	if (!IS_ENABLED(CONFIG_EFI))
119 		return;
120 
121 	if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA ||
122 	    (efi_mem_type(addr) == EFI_BOOT_SERVICES_DATA &&
123 	     efi_mem_attributes(addr) & EFI_MEMORY_RUNTIME))
124 		desc->flags |= IORES_MAP_ENCRYPTED;
125 }
126 
__ioremap_collect_map_flags(struct resource * res,void * arg)127 static int __ioremap_collect_map_flags(struct resource *res, void *arg)
128 {
129 	struct ioremap_desc *desc = arg;
130 
131 	if (!(desc->flags & IORES_MAP_SYSTEM_RAM))
132 		desc->flags |= __ioremap_check_ram(res);
133 
134 	if (!(desc->flags & IORES_MAP_ENCRYPTED))
135 		desc->flags |= __ioremap_check_encrypted(res);
136 
137 	return ((desc->flags & (IORES_MAP_SYSTEM_RAM | IORES_MAP_ENCRYPTED)) ==
138 			       (IORES_MAP_SYSTEM_RAM | IORES_MAP_ENCRYPTED));
139 }
140 
141 /*
142  * To avoid multiple resource walks, this function walks resources marked as
143  * IORESOURCE_MEM and IORESOURCE_BUSY and looking for system RAM and/or a
144  * resource described not as IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES).
145  *
146  * After that, deal with misc other ranges in __ioremap_check_other() which do
147  * not fall into the above category.
148  */
__ioremap_check_mem(resource_size_t addr,unsigned long size,struct ioremap_desc * desc)149 static void __ioremap_check_mem(resource_size_t addr, unsigned long size,
150 				struct ioremap_desc *desc)
151 {
152 	u64 start, end;
153 
154 	start = (u64)addr;
155 	end = start + size - 1;
156 	memset(desc, 0, sizeof(struct ioremap_desc));
157 
158 	walk_mem_res(start, end, desc, __ioremap_collect_map_flags);
159 
160 	__ioremap_check_other(addr, desc);
161 }
162 
163 /*
164  * Remap an arbitrary physical address space into the kernel virtual
165  * address space. It transparently creates kernel huge I/O mapping when
166  * the physical address is aligned by a huge page size (1GB or 2MB) and
167  * the requested size is at least the huge page size.
168  *
169  * NOTE: MTRRs can override PAT memory types with a 4KB granularity.
170  * Therefore, the mapping code falls back to use a smaller page toward 4KB
171  * when a mapping range is covered by non-WB type of MTRRs.
172  *
173  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
174  * have to convert them into an offset in a page-aligned mapping, but the
175  * caller shouldn't need to know that small detail.
176  */
177 static void __iomem *
__ioremap_caller(resource_size_t phys_addr,unsigned long size,enum page_cache_mode pcm,void * caller,bool encrypted)178 __ioremap_caller(resource_size_t phys_addr, unsigned long size,
179 		 enum page_cache_mode pcm, void *caller, bool encrypted)
180 {
181 	unsigned long offset, vaddr;
182 	resource_size_t last_addr;
183 	const resource_size_t unaligned_phys_addr = phys_addr;
184 	const unsigned long unaligned_size = size;
185 	struct ioremap_desc io_desc;
186 	struct vm_struct *area;
187 	enum page_cache_mode new_pcm;
188 	pgprot_t prot;
189 	int retval;
190 	void __iomem *ret_addr;
191 
192 	/* Don't allow wraparound or zero size */
193 	last_addr = phys_addr + size - 1;
194 	if (!size || last_addr < phys_addr)
195 		return NULL;
196 
197 	if (!phys_addr_valid(phys_addr)) {
198 		printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
199 		       (unsigned long long)phys_addr);
200 		WARN_ON_ONCE(1);
201 		return NULL;
202 	}
203 
204 	__ioremap_check_mem(phys_addr, size, &io_desc);
205 
206 	/*
207 	 * Don't allow anybody to remap normal RAM that we're using..
208 	 */
209 	if (io_desc.flags & IORES_MAP_SYSTEM_RAM) {
210 		WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
211 			  &phys_addr, &last_addr);
212 		return NULL;
213 	}
214 
215 	/*
216 	 * Mappings have to be page-aligned
217 	 */
218 	offset = phys_addr & ~PAGE_MASK;
219 	phys_addr &= PAGE_MASK;
220 	size = PAGE_ALIGN(last_addr+1) - phys_addr;
221 
222 	/*
223 	 * Mask out any bits not part of the actual physical
224 	 * address, like memory encryption bits.
225 	 */
226 	phys_addr &= PHYSICAL_PAGE_MASK;
227 
228 	retval = memtype_reserve(phys_addr, (u64)phys_addr + size,
229 						pcm, &new_pcm);
230 	if (retval) {
231 		printk(KERN_ERR "ioremap memtype_reserve failed %d\n", retval);
232 		return NULL;
233 	}
234 
235 	if (pcm != new_pcm) {
236 		if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) {
237 			printk(KERN_ERR
238 		"ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n",
239 				(unsigned long long)phys_addr,
240 				(unsigned long long)(phys_addr + size),
241 				pcm, new_pcm);
242 			goto err_free_memtype;
243 		}
244 		pcm = new_pcm;
245 	}
246 
247 	/*
248 	 * If the page being mapped is in memory and SEV is active then
249 	 * make sure the memory encryption attribute is enabled in the
250 	 * resulting mapping.
251 	 */
252 	prot = PAGE_KERNEL_IO;
253 	if ((io_desc.flags & IORES_MAP_ENCRYPTED) || encrypted)
254 		prot = pgprot_encrypted(prot);
255 
256 	switch (pcm) {
257 	case _PAGE_CACHE_MODE_UC:
258 	default:
259 		prot = __pgprot(pgprot_val(prot) |
260 				cachemode2protval(_PAGE_CACHE_MODE_UC));
261 		break;
262 	case _PAGE_CACHE_MODE_UC_MINUS:
263 		prot = __pgprot(pgprot_val(prot) |
264 				cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
265 		break;
266 	case _PAGE_CACHE_MODE_WC:
267 		prot = __pgprot(pgprot_val(prot) |
268 				cachemode2protval(_PAGE_CACHE_MODE_WC));
269 		break;
270 	case _PAGE_CACHE_MODE_WT:
271 		prot = __pgprot(pgprot_val(prot) |
272 				cachemode2protval(_PAGE_CACHE_MODE_WT));
273 		break;
274 	case _PAGE_CACHE_MODE_WB:
275 		break;
276 	}
277 
278 	/*
279 	 * Ok, go for it..
280 	 */
281 	area = get_vm_area_caller(size, VM_IOREMAP, caller);
282 	if (!area)
283 		goto err_free_memtype;
284 	area->phys_addr = phys_addr;
285 	vaddr = (unsigned long) area->addr;
286 
287 	if (memtype_kernel_map_sync(phys_addr, size, pcm))
288 		goto err_free_area;
289 
290 	if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
291 		goto err_free_area;
292 
293 	ret_addr = (void __iomem *) (vaddr + offset);
294 	mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
295 
296 	/*
297 	 * Check if the request spans more than any BAR in the iomem resource
298 	 * tree.
299 	 */
300 	if (iomem_map_sanity_check(unaligned_phys_addr, unaligned_size))
301 		pr_warn("caller %pS mapping multiple BARs\n", caller);
302 
303 	return ret_addr;
304 err_free_area:
305 	free_vm_area(area);
306 err_free_memtype:
307 	memtype_free(phys_addr, phys_addr + size);
308 	return NULL;
309 }
310 
311 /**
312  * ioremap     -   map bus memory into CPU space
313  * @phys_addr:    bus address of the memory
314  * @size:      size of the resource to map
315  *
316  * ioremap performs a platform specific sequence of operations to
317  * make bus memory CPU accessible via the readb/readw/readl/writeb/
318  * writew/writel functions and the other mmio helpers. The returned
319  * address is not guaranteed to be usable directly as a virtual
320  * address.
321  *
322  * This version of ioremap ensures that the memory is marked uncachable
323  * on the CPU as well as honouring existing caching rules from things like
324  * the PCI bus. Note that there are other caches and buffers on many
325  * busses. In particular driver authors should read up on PCI writes
326  *
327  * It's useful if some control registers are in such an area and
328  * write combining or read caching is not desirable:
329  *
330  * Must be freed with iounmap.
331  */
ioremap(resource_size_t phys_addr,unsigned long size)332 void __iomem *ioremap(resource_size_t phys_addr, unsigned long size)
333 {
334 	/*
335 	 * Ideally, this should be:
336 	 *	pat_enabled() ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS;
337 	 *
338 	 * Till we fix all X drivers to use ioremap_wc(), we will use
339 	 * UC MINUS. Drivers that are certain they need or can already
340 	 * be converted over to strong UC can use ioremap_uc().
341 	 */
342 	enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS;
343 
344 	return __ioremap_caller(phys_addr, size, pcm,
345 				__builtin_return_address(0), false);
346 }
347 EXPORT_SYMBOL(ioremap);
348 
349 /**
350  * ioremap_uc     -   map bus memory into CPU space as strongly uncachable
351  * @phys_addr:    bus address of the memory
352  * @size:      size of the resource to map
353  *
354  * ioremap_uc performs a platform specific sequence of operations to
355  * make bus memory CPU accessible via the readb/readw/readl/writeb/
356  * writew/writel functions and the other mmio helpers. The returned
357  * address is not guaranteed to be usable directly as a virtual
358  * address.
359  *
360  * This version of ioremap ensures that the memory is marked with a strong
361  * preference as completely uncachable on the CPU when possible. For non-PAT
362  * systems this ends up setting page-attribute flags PCD=1, PWT=1. For PAT
363  * systems this will set the PAT entry for the pages as strong UC.  This call
364  * will honor existing caching rules from things like the PCI bus. Note that
365  * there are other caches and buffers on many busses. In particular driver
366  * authors should read up on PCI writes.
367  *
368  * It's useful if some control registers are in such an area and
369  * write combining or read caching is not desirable:
370  *
371  * Must be freed with iounmap.
372  */
ioremap_uc(resource_size_t phys_addr,unsigned long size)373 void __iomem *ioremap_uc(resource_size_t phys_addr, unsigned long size)
374 {
375 	enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC;
376 
377 	return __ioremap_caller(phys_addr, size, pcm,
378 				__builtin_return_address(0), false);
379 }
380 EXPORT_SYMBOL_GPL(ioremap_uc);
381 
382 /**
383  * ioremap_wc	-	map memory into CPU space write combined
384  * @phys_addr:	bus address of the memory
385  * @size:	size of the resource to map
386  *
387  * This version of ioremap ensures that the memory is marked write combining.
388  * Write combining allows faster writes to some hardware devices.
389  *
390  * Must be freed with iounmap.
391  */
ioremap_wc(resource_size_t phys_addr,unsigned long size)392 void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
393 {
394 	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
395 					__builtin_return_address(0), false);
396 }
397 EXPORT_SYMBOL(ioremap_wc);
398 
399 /**
400  * ioremap_wt	-	map memory into CPU space write through
401  * @phys_addr:	bus address of the memory
402  * @size:	size of the resource to map
403  *
404  * This version of ioremap ensures that the memory is marked write through.
405  * Write through stores data into memory while keeping the cache up-to-date.
406  *
407  * Must be freed with iounmap.
408  */
ioremap_wt(resource_size_t phys_addr,unsigned long size)409 void __iomem *ioremap_wt(resource_size_t phys_addr, unsigned long size)
410 {
411 	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WT,
412 					__builtin_return_address(0), false);
413 }
414 EXPORT_SYMBOL(ioremap_wt);
415 
ioremap_encrypted(resource_size_t phys_addr,unsigned long size)416 void __iomem *ioremap_encrypted(resource_size_t phys_addr, unsigned long size)
417 {
418 	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
419 				__builtin_return_address(0), true);
420 }
421 EXPORT_SYMBOL(ioremap_encrypted);
422 
ioremap_cache(resource_size_t phys_addr,unsigned long size)423 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
424 {
425 	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
426 				__builtin_return_address(0), false);
427 }
428 EXPORT_SYMBOL(ioremap_cache);
429 
ioremap_prot(resource_size_t phys_addr,unsigned long size,unsigned long prot_val)430 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
431 				unsigned long prot_val)
432 {
433 	return __ioremap_caller(phys_addr, size,
434 				pgprot2cachemode(__pgprot(prot_val)),
435 				__builtin_return_address(0), false);
436 }
437 EXPORT_SYMBOL(ioremap_prot);
438 
439 /**
440  * iounmap - Free a IO remapping
441  * @addr: virtual address from ioremap_*
442  *
443  * Caller must ensure there is only one unmapping for the same pointer.
444  */
iounmap(volatile void __iomem * addr)445 void iounmap(volatile void __iomem *addr)
446 {
447 	struct vm_struct *p, *o;
448 
449 	if ((void __force *)addr <= high_memory)
450 		return;
451 
452 	/*
453 	 * The PCI/ISA range special-casing was removed from __ioremap()
454 	 * so this check, in theory, can be removed. However, there are
455 	 * cases where iounmap() is called for addresses not obtained via
456 	 * ioremap() (vga16fb for example). Add a warning so that these
457 	 * cases can be caught and fixed.
458 	 */
459 	if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
460 	    (void __force *)addr < phys_to_virt(ISA_END_ADDRESS)) {
461 		WARN(1, "iounmap() called for ISA range not obtained using ioremap()\n");
462 		return;
463 	}
464 
465 	mmiotrace_iounmap(addr);
466 
467 	addr = (volatile void __iomem *)
468 		(PAGE_MASK & (unsigned long __force)addr);
469 
470 	/* Use the vm area unlocked, assuming the caller
471 	   ensures there isn't another iounmap for the same address
472 	   in parallel. Reuse of the virtual address is prevented by
473 	   leaving it in the global lists until we're done with it.
474 	   cpa takes care of the direct mappings. */
475 	p = find_vm_area((void __force *)addr);
476 
477 	if (!p) {
478 		printk(KERN_ERR "iounmap: bad address %p\n", addr);
479 		dump_stack();
480 		return;
481 	}
482 
483 	memtype_free(p->phys_addr, p->phys_addr + get_vm_area_size(p));
484 
485 	/* Finally remove it */
486 	o = remove_vm_area((void __force *)addr);
487 	BUG_ON(p != o || o == NULL);
488 	kfree(p);
489 }
490 EXPORT_SYMBOL(iounmap);
491 
492 /*
493  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
494  * access
495  */
xlate_dev_mem_ptr(phys_addr_t phys)496 void *xlate_dev_mem_ptr(phys_addr_t phys)
497 {
498 	unsigned long start  = phys &  PAGE_MASK;
499 	unsigned long offset = phys & ~PAGE_MASK;
500 	void *vaddr;
501 
502 	/* memremap() maps if RAM, otherwise falls back to ioremap() */
503 	vaddr = memremap(start, PAGE_SIZE, MEMREMAP_WB);
504 
505 	/* Only add the offset on success and return NULL if memremap() failed */
506 	if (vaddr)
507 		vaddr += offset;
508 
509 	return vaddr;
510 }
511 
unxlate_dev_mem_ptr(phys_addr_t phys,void * addr)512 void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
513 {
514 	memunmap((void *)((unsigned long)addr & PAGE_MASK));
515 }
516 
517 /*
518  * Examine the physical address to determine if it is an area of memory
519  * that should be mapped decrypted.  If the memory is not part of the
520  * kernel usable area it was accessed and created decrypted, so these
521  * areas should be mapped decrypted. And since the encryption key can
522  * change across reboots, persistent memory should also be mapped
523  * decrypted.
524  *
525  * If SEV is active, that implies that BIOS/UEFI also ran encrypted so
526  * only persistent memory should be mapped decrypted.
527  */
memremap_should_map_decrypted(resource_size_t phys_addr,unsigned long size)528 static bool memremap_should_map_decrypted(resource_size_t phys_addr,
529 					  unsigned long size)
530 {
531 	int is_pmem;
532 
533 	/*
534 	 * Check if the address is part of a persistent memory region.
535 	 * This check covers areas added by E820, EFI and ACPI.
536 	 */
537 	is_pmem = region_intersects(phys_addr, size, IORESOURCE_MEM,
538 				    IORES_DESC_PERSISTENT_MEMORY);
539 	if (is_pmem != REGION_DISJOINT)
540 		return true;
541 
542 	/*
543 	 * Check if the non-volatile attribute is set for an EFI
544 	 * reserved area.
545 	 */
546 	if (efi_enabled(EFI_BOOT)) {
547 		switch (efi_mem_type(phys_addr)) {
548 		case EFI_RESERVED_TYPE:
549 			if (efi_mem_attributes(phys_addr) & EFI_MEMORY_NV)
550 				return true;
551 			break;
552 		default:
553 			break;
554 		}
555 	}
556 
557 	/* Check if the address is outside kernel usable area */
558 	switch (e820__get_entry_type(phys_addr, phys_addr + size - 1)) {
559 	case E820_TYPE_RESERVED:
560 	case E820_TYPE_ACPI:
561 	case E820_TYPE_NVS:
562 	case E820_TYPE_UNUSABLE:
563 		/* For SEV, these areas are encrypted */
564 		if (sev_active())
565 			break;
566 		fallthrough;
567 
568 	case E820_TYPE_PRAM:
569 		return true;
570 	default:
571 		break;
572 	}
573 
574 	return false;
575 }
576 
577 /*
578  * Examine the physical address to determine if it is EFI data. Check
579  * it against the boot params structure and EFI tables and memory types.
580  */
memremap_is_efi_data(resource_size_t phys_addr,unsigned long size)581 static bool memremap_is_efi_data(resource_size_t phys_addr,
582 				 unsigned long size)
583 {
584 	u64 paddr;
585 
586 	/* Check if the address is part of EFI boot/runtime data */
587 	if (!efi_enabled(EFI_BOOT))
588 		return false;
589 
590 	paddr = boot_params.efi_info.efi_memmap_hi;
591 	paddr <<= 32;
592 	paddr |= boot_params.efi_info.efi_memmap;
593 	if (phys_addr == paddr)
594 		return true;
595 
596 	paddr = boot_params.efi_info.efi_systab_hi;
597 	paddr <<= 32;
598 	paddr |= boot_params.efi_info.efi_systab;
599 	if (phys_addr == paddr)
600 		return true;
601 
602 	if (efi_is_table_address(phys_addr))
603 		return true;
604 
605 	switch (efi_mem_type(phys_addr)) {
606 	case EFI_BOOT_SERVICES_DATA:
607 	case EFI_RUNTIME_SERVICES_DATA:
608 		return true;
609 	default:
610 		break;
611 	}
612 
613 	return false;
614 }
615 
616 /*
617  * Examine the physical address to determine if it is boot data by checking
618  * it against the boot params setup_data chain.
619  */
memremap_is_setup_data(resource_size_t phys_addr,unsigned long size)620 static bool memremap_is_setup_data(resource_size_t phys_addr,
621 				   unsigned long size)
622 {
623 	struct setup_indirect *indirect;
624 	struct setup_data *data;
625 	u64 paddr, paddr_next;
626 
627 	paddr = boot_params.hdr.setup_data;
628 	while (paddr) {
629 		unsigned int len;
630 
631 		if (phys_addr == paddr)
632 			return true;
633 
634 		data = memremap(paddr, sizeof(*data),
635 				MEMREMAP_WB | MEMREMAP_DEC);
636 		if (!data) {
637 			pr_warn("failed to memremap setup_data entry\n");
638 			return false;
639 		}
640 
641 		paddr_next = data->next;
642 		len = data->len;
643 
644 		if ((phys_addr > paddr) && (phys_addr < (paddr + len))) {
645 			memunmap(data);
646 			return true;
647 		}
648 
649 		if (data->type == SETUP_INDIRECT) {
650 			memunmap(data);
651 			data = memremap(paddr, sizeof(*data) + len,
652 					MEMREMAP_WB | MEMREMAP_DEC);
653 			if (!data) {
654 				pr_warn("failed to memremap indirect setup_data\n");
655 				return false;
656 			}
657 
658 			indirect = (struct setup_indirect *)data->data;
659 
660 			if (indirect->type != SETUP_INDIRECT) {
661 				paddr = indirect->addr;
662 				len = indirect->len;
663 			}
664 		}
665 
666 		memunmap(data);
667 
668 		if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
669 			return true;
670 
671 		paddr = paddr_next;
672 	}
673 
674 	return false;
675 }
676 
677 /*
678  * Examine the physical address to determine if it is boot data by checking
679  * it against the boot params setup_data chain (early boot version).
680  */
early_memremap_is_setup_data(resource_size_t phys_addr,unsigned long size)681 static bool __init early_memremap_is_setup_data(resource_size_t phys_addr,
682 						unsigned long size)
683 {
684 	struct setup_indirect *indirect;
685 	struct setup_data *data;
686 	u64 paddr, paddr_next;
687 
688 	paddr = boot_params.hdr.setup_data;
689 	while (paddr) {
690 		unsigned int len, size;
691 
692 		if (phys_addr == paddr)
693 			return true;
694 
695 		data = early_memremap_decrypted(paddr, sizeof(*data));
696 		if (!data) {
697 			pr_warn("failed to early memremap setup_data entry\n");
698 			return false;
699 		}
700 
701 		size = sizeof(*data);
702 
703 		paddr_next = data->next;
704 		len = data->len;
705 
706 		if ((phys_addr > paddr) && (phys_addr < (paddr + len))) {
707 			early_memunmap(data, sizeof(*data));
708 			return true;
709 		}
710 
711 		if (data->type == SETUP_INDIRECT) {
712 			size += len;
713 			early_memunmap(data, sizeof(*data));
714 			data = early_memremap_decrypted(paddr, size);
715 			if (!data) {
716 				pr_warn("failed to early memremap indirect setup_data\n");
717 				return false;
718 			}
719 
720 			indirect = (struct setup_indirect *)data->data;
721 
722 			if (indirect->type != SETUP_INDIRECT) {
723 				paddr = indirect->addr;
724 				len = indirect->len;
725 			}
726 		}
727 
728 		early_memunmap(data, size);
729 
730 		if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
731 			return true;
732 
733 		paddr = paddr_next;
734 	}
735 
736 	return false;
737 }
738 
739 /*
740  * Architecture function to determine if RAM remap is allowed. By default, a
741  * RAM remap will map the data as encrypted. Determine if a RAM remap should
742  * not be done so that the data will be mapped decrypted.
743  */
arch_memremap_can_ram_remap(resource_size_t phys_addr,unsigned long size,unsigned long flags)744 bool arch_memremap_can_ram_remap(resource_size_t phys_addr, unsigned long size,
745 				 unsigned long flags)
746 {
747 	if (!mem_encrypt_active())
748 		return true;
749 
750 	if (flags & MEMREMAP_ENC)
751 		return true;
752 
753 	if (flags & MEMREMAP_DEC)
754 		return false;
755 
756 	if (sme_active()) {
757 		if (memremap_is_setup_data(phys_addr, size) ||
758 		    memremap_is_efi_data(phys_addr, size))
759 			return false;
760 	}
761 
762 	return !memremap_should_map_decrypted(phys_addr, size);
763 }
764 
765 /*
766  * Architecture override of __weak function to adjust the protection attributes
767  * used when remapping memory. By default, early_memremap() will map the data
768  * as encrypted. Determine if an encrypted mapping should not be done and set
769  * the appropriate protection attributes.
770  */
early_memremap_pgprot_adjust(resource_size_t phys_addr,unsigned long size,pgprot_t prot)771 pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,
772 					     unsigned long size,
773 					     pgprot_t prot)
774 {
775 	bool encrypted_prot;
776 
777 	if (!mem_encrypt_active())
778 		return prot;
779 
780 	encrypted_prot = true;
781 
782 	if (sme_active()) {
783 		if (early_memremap_is_setup_data(phys_addr, size) ||
784 		    memremap_is_efi_data(phys_addr, size))
785 			encrypted_prot = false;
786 	}
787 
788 	if (encrypted_prot && memremap_should_map_decrypted(phys_addr, size))
789 		encrypted_prot = false;
790 
791 	return encrypted_prot ? pgprot_encrypted(prot)
792 			      : pgprot_decrypted(prot);
793 }
794 
phys_mem_access_encrypted(unsigned long phys_addr,unsigned long size)795 bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size)
796 {
797 	return arch_memremap_can_ram_remap(phys_addr, size, 0);
798 }
799 
800 #ifdef CONFIG_AMD_MEM_ENCRYPT
801 /* Remap memory with encryption */
early_memremap_encrypted(resource_size_t phys_addr,unsigned long size)802 void __init *early_memremap_encrypted(resource_size_t phys_addr,
803 				      unsigned long size)
804 {
805 	return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC);
806 }
807 
808 /*
809  * Remap memory with encryption and write-protected - cannot be called
810  * before pat_init() is called
811  */
early_memremap_encrypted_wp(resource_size_t phys_addr,unsigned long size)812 void __init *early_memremap_encrypted_wp(resource_size_t phys_addr,
813 					 unsigned long size)
814 {
815 	if (!x86_has_pat_wp())
816 		return NULL;
817 	return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC_WP);
818 }
819 
820 /* Remap memory without encryption */
early_memremap_decrypted(resource_size_t phys_addr,unsigned long size)821 void __init *early_memremap_decrypted(resource_size_t phys_addr,
822 				      unsigned long size)
823 {
824 	return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC);
825 }
826 
827 /*
828  * Remap memory without encryption and write-protected - cannot be called
829  * before pat_init() is called
830  */
early_memremap_decrypted_wp(resource_size_t phys_addr,unsigned long size)831 void __init *early_memremap_decrypted_wp(resource_size_t phys_addr,
832 					 unsigned long size)
833 {
834 	if (!x86_has_pat_wp())
835 		return NULL;
836 	return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC_WP);
837 }
838 #endif	/* CONFIG_AMD_MEM_ENCRYPT */
839 
840 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
841 
early_ioremap_pmd(unsigned long addr)842 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
843 {
844 	/* Don't assume we're using swapper_pg_dir at this point */
845 	pgd_t *base = __va(read_cr3_pa());
846 	pgd_t *pgd = &base[pgd_index(addr)];
847 	p4d_t *p4d = p4d_offset(pgd, addr);
848 	pud_t *pud = pud_offset(p4d, addr);
849 	pmd_t *pmd = pmd_offset(pud, addr);
850 
851 	return pmd;
852 }
853 
early_ioremap_pte(unsigned long addr)854 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
855 {
856 	return &bm_pte[pte_index(addr)];
857 }
858 
is_early_ioremap_ptep(pte_t * ptep)859 bool __init is_early_ioremap_ptep(pte_t *ptep)
860 {
861 	return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
862 }
863 
early_ioremap_init(void)864 void __init early_ioremap_init(void)
865 {
866 	pmd_t *pmd;
867 
868 #ifdef CONFIG_X86_64
869 	BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
870 #else
871 	WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
872 #endif
873 
874 	early_ioremap_setup();
875 
876 	pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
877 	memset(bm_pte, 0, sizeof(bm_pte));
878 	pmd_populate_kernel(&init_mm, pmd, bm_pte);
879 
880 	/*
881 	 * The boot-ioremap range spans multiple pmds, for which
882 	 * we are not prepared:
883 	 */
884 #define __FIXADDR_TOP (-PAGE_SIZE)
885 	BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
886 		     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
887 #undef __FIXADDR_TOP
888 	if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
889 		WARN_ON(1);
890 		printk(KERN_WARNING "pmd %p != %p\n",
891 		       pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
892 		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
893 			fix_to_virt(FIX_BTMAP_BEGIN));
894 		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
895 			fix_to_virt(FIX_BTMAP_END));
896 
897 		printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
898 		printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
899 		       FIX_BTMAP_BEGIN);
900 	}
901 }
902 
__early_set_fixmap(enum fixed_addresses idx,phys_addr_t phys,pgprot_t flags)903 void __init __early_set_fixmap(enum fixed_addresses idx,
904 			       phys_addr_t phys, pgprot_t flags)
905 {
906 	unsigned long addr = __fix_to_virt(idx);
907 	pte_t *pte;
908 
909 	if (idx >= __end_of_fixed_addresses) {
910 		BUG();
911 		return;
912 	}
913 	pte = early_ioremap_pte(addr);
914 
915 	/* Sanitize 'prot' against any unsupported bits: */
916 	pgprot_val(flags) &= __supported_pte_mask;
917 
918 	if (pgprot_val(flags))
919 		set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
920 	else
921 		pte_clear(&init_mm, addr, pte);
922 	flush_tlb_one_kernel(addr);
923 }
924