• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * AMD Memory Encryption Support
4  *
5  * Copyright (C) 2016 Advanced Micro Devices, Inc.
6  *
7  * Author: Tom Lendacky <thomas.lendacky@amd.com>
8  */
9 
10 #define DISABLE_BRANCH_PROFILING
11 
12 #include <linux/linkage.h>
13 #include <linux/init.h>
14 #include <linux/mm.h>
15 #include <linux/dma-direct.h>
16 #include <linux/swiotlb.h>
17 #include <linux/mem_encrypt.h>
18 #include <linux/device.h>
19 #include <linux/kernel.h>
20 #include <linux/bitops.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/cc_platform.h>
23 
24 #include <asm/tlbflush.h>
25 #include <asm/fixmap.h>
26 #include <asm/setup.h>
27 #include <asm/bootparam.h>
28 #include <asm/set_memory.h>
29 #include <asm/cacheflush.h>
30 #include <asm/processor-flags.h>
31 #include <asm/msr.h>
32 #include <asm/cmdline.h>
33 
34 #include "mm_internal.h"
35 
36 /*
37  * Since SME related variables are set early in the boot process they must
38  * reside in the .data section so as not to be zeroed out when the .bss
39  * section is later cleared.
40  */
41 u64 sme_me_mask __section(".data") = 0;
42 u64 sev_status __section(".data") = 0;
43 u64 sev_check_data __section(".data") = 0;
44 EXPORT_SYMBOL(sme_me_mask);
45 DEFINE_STATIC_KEY_FALSE(sev_enable_key);
46 EXPORT_SYMBOL_GPL(sev_enable_key);
47 
48 bool sev_enabled __section(".data");
49 
50 /* Buffer used for early in-place encryption by BSP, no locking needed */
51 static char sme_early_buffer[PAGE_SIZE] __initdata __aligned(PAGE_SIZE);
52 
53 /*
54  * This routine does not change the underlying encryption setting of the
55  * page(s) that map this memory. It assumes that eventually the memory is
56  * meant to be accessed as either encrypted or decrypted but the contents
57  * are currently not in the desired state.
58  *
59  * This routine follows the steps outlined in the AMD64 Architecture
60  * Programmer's Manual Volume 2, Section 7.10.8 Encrypt-in-Place.
61  */
__sme_early_enc_dec(resource_size_t paddr,unsigned long size,bool enc)62 static void __init __sme_early_enc_dec(resource_size_t paddr,
63 				       unsigned long size, bool enc)
64 {
65 	void *src, *dst;
66 	size_t len;
67 
68 	if (!sme_me_mask)
69 		return;
70 
71 	wbinvd();
72 
73 	/*
74 	 * There are limited number of early mapping slots, so map (at most)
75 	 * one page at time.
76 	 */
77 	while (size) {
78 		len = min_t(size_t, sizeof(sme_early_buffer), size);
79 
80 		/*
81 		 * Create mappings for the current and desired format of
82 		 * the memory. Use a write-protected mapping for the source.
83 		 */
84 		src = enc ? early_memremap_decrypted_wp(paddr, len) :
85 			    early_memremap_encrypted_wp(paddr, len);
86 
87 		dst = enc ? early_memremap_encrypted(paddr, len) :
88 			    early_memremap_decrypted(paddr, len);
89 
90 		/*
91 		 * If a mapping can't be obtained to perform the operation,
92 		 * then eventual access of that area in the desired mode
93 		 * will cause a crash.
94 		 */
95 		BUG_ON(!src || !dst);
96 
97 		/*
98 		 * Use a temporary buffer, of cache-line multiple size, to
99 		 * avoid data corruption as documented in the APM.
100 		 */
101 		memcpy(sme_early_buffer, src, len);
102 		memcpy(dst, sme_early_buffer, len);
103 
104 		early_memunmap(dst, len);
105 		early_memunmap(src, len);
106 
107 		paddr += len;
108 		size -= len;
109 	}
110 }
111 
sme_early_encrypt(resource_size_t paddr,unsigned long size)112 void __init sme_early_encrypt(resource_size_t paddr, unsigned long size)
113 {
114 	__sme_early_enc_dec(paddr, size, true);
115 }
116 
sme_early_decrypt(resource_size_t paddr,unsigned long size)117 void __init sme_early_decrypt(resource_size_t paddr, unsigned long size)
118 {
119 	__sme_early_enc_dec(paddr, size, false);
120 }
121 
__sme_early_map_unmap_mem(void * vaddr,unsigned long size,bool map)122 static void __init __sme_early_map_unmap_mem(void *vaddr, unsigned long size,
123 					     bool map)
124 {
125 	unsigned long paddr = (unsigned long)vaddr - __PAGE_OFFSET;
126 	pmdval_t pmd_flags, pmd;
127 
128 	/* Use early_pmd_flags but remove the encryption mask */
129 	pmd_flags = __sme_clr(early_pmd_flags);
130 
131 	do {
132 		pmd = map ? (paddr & PMD_MASK) + pmd_flags : 0;
133 		__early_make_pgtable((unsigned long)vaddr, pmd);
134 
135 		vaddr += PMD_SIZE;
136 		paddr += PMD_SIZE;
137 		size = (size <= PMD_SIZE) ? 0 : size - PMD_SIZE;
138 	} while (size);
139 
140 	flush_tlb_local();
141 }
142 
sme_unmap_bootdata(char * real_mode_data)143 void __init sme_unmap_bootdata(char *real_mode_data)
144 {
145 	struct boot_params *boot_data;
146 	unsigned long cmdline_paddr;
147 
148 	if (!sme_active())
149 		return;
150 
151 	/* Get the command line address before unmapping the real_mode_data */
152 	boot_data = (struct boot_params *)real_mode_data;
153 	cmdline_paddr = boot_data->hdr.cmd_line_ptr | ((u64)boot_data->ext_cmd_line_ptr << 32);
154 
155 	__sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), false);
156 
157 	if (!cmdline_paddr)
158 		return;
159 
160 	__sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, false);
161 }
162 
sme_map_bootdata(char * real_mode_data)163 void __init sme_map_bootdata(char *real_mode_data)
164 {
165 	struct boot_params *boot_data;
166 	unsigned long cmdline_paddr;
167 
168 	if (!sme_active())
169 		return;
170 
171 	__sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), true);
172 
173 	/* Get the command line address after mapping the real_mode_data */
174 	boot_data = (struct boot_params *)real_mode_data;
175 	cmdline_paddr = boot_data->hdr.cmd_line_ptr | ((u64)boot_data->ext_cmd_line_ptr << 32);
176 
177 	if (!cmdline_paddr)
178 		return;
179 
180 	__sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, true);
181 }
182 
sme_early_init(void)183 void __init sme_early_init(void)
184 {
185 	unsigned int i;
186 
187 	if (!sme_me_mask)
188 		return;
189 
190 	early_pmd_flags = __sme_set(early_pmd_flags);
191 
192 	__supported_pte_mask = __sme_set(__supported_pte_mask);
193 
194 	/* Update the protection map with memory encryption mask */
195 	for (i = 0; i < ARRAY_SIZE(protection_map); i++)
196 		protection_map[i] = pgprot_encrypted(protection_map[i]);
197 
198 	if (sev_active())
199 		swiotlb_force = SWIOTLB_FORCE;
200 }
201 
__set_clr_pte_enc(pte_t * kpte,int level,bool enc)202 static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
203 {
204 	pgprot_t old_prot, new_prot;
205 	unsigned long pfn, pa, size;
206 	pte_t new_pte;
207 
208 	switch (level) {
209 	case PG_LEVEL_4K:
210 		pfn = pte_pfn(*kpte);
211 		old_prot = pte_pgprot(*kpte);
212 		break;
213 	case PG_LEVEL_2M:
214 		pfn = pmd_pfn(*(pmd_t *)kpte);
215 		old_prot = pmd_pgprot(*(pmd_t *)kpte);
216 		break;
217 	case PG_LEVEL_1G:
218 		pfn = pud_pfn(*(pud_t *)kpte);
219 		old_prot = pud_pgprot(*(pud_t *)kpte);
220 		break;
221 	default:
222 		return;
223 	}
224 
225 	new_prot = old_prot;
226 	if (enc)
227 		pgprot_val(new_prot) |= _PAGE_ENC;
228 	else
229 		pgprot_val(new_prot) &= ~_PAGE_ENC;
230 
231 	/* If prot is same then do nothing. */
232 	if (pgprot_val(old_prot) == pgprot_val(new_prot))
233 		return;
234 
235 	pa = pfn << PAGE_SHIFT;
236 	size = page_level_size(level);
237 
238 	/*
239 	 * We are going to perform in-place en-/decryption and change the
240 	 * physical page attribute from C=1 to C=0 or vice versa. Flush the
241 	 * caches to ensure that data gets accessed with the correct C-bit.
242 	 */
243 	clflush_cache_range(__va(pa), size);
244 
245 	/* Encrypt/decrypt the contents in-place */
246 	if (enc)
247 		sme_early_encrypt(pa, size);
248 	else
249 		sme_early_decrypt(pa, size);
250 
251 	/* Change the page encryption mask. */
252 	new_pte = pfn_pte(pfn, new_prot);
253 	set_pte_atomic(kpte, new_pte);
254 }
255 
early_set_memory_enc_dec(unsigned long vaddr,unsigned long size,bool enc)256 static int __init early_set_memory_enc_dec(unsigned long vaddr,
257 					   unsigned long size, bool enc)
258 {
259 	unsigned long vaddr_end, vaddr_next;
260 	unsigned long psize, pmask;
261 	int split_page_size_mask;
262 	int level, ret;
263 	pte_t *kpte;
264 
265 	vaddr_next = vaddr;
266 	vaddr_end = vaddr + size;
267 
268 	for (; vaddr < vaddr_end; vaddr = vaddr_next) {
269 		kpte = lookup_address(vaddr, &level);
270 		if (!kpte || pte_none(*kpte)) {
271 			ret = 1;
272 			goto out;
273 		}
274 
275 		if (level == PG_LEVEL_4K) {
276 			__set_clr_pte_enc(kpte, level, enc);
277 			vaddr_next = (vaddr & PAGE_MASK) + PAGE_SIZE;
278 			continue;
279 		}
280 
281 		psize = page_level_size(level);
282 		pmask = page_level_mask(level);
283 
284 		/*
285 		 * Check whether we can change the large page in one go.
286 		 * We request a split when the address is not aligned and
287 		 * the number of pages to set/clear encryption bit is smaller
288 		 * than the number of pages in the large page.
289 		 */
290 		if (vaddr == (vaddr & pmask) &&
291 		    ((vaddr_end - vaddr) >= psize)) {
292 			__set_clr_pte_enc(kpte, level, enc);
293 			vaddr_next = (vaddr & pmask) + psize;
294 			continue;
295 		}
296 
297 		/*
298 		 * The virtual address is part of a larger page, create the next
299 		 * level page table mapping (4K or 2M). If it is part of a 2M
300 		 * page then we request a split of the large page into 4K
301 		 * chunks. A 1GB large page is split into 2M pages, resp.
302 		 */
303 		if (level == PG_LEVEL_2M)
304 			split_page_size_mask = 0;
305 		else
306 			split_page_size_mask = 1 << PG_LEVEL_2M;
307 
308 		/*
309 		 * kernel_physical_mapping_change() does not flush the TLBs, so
310 		 * a TLB flush is required after we exit from the for loop.
311 		 */
312 		kernel_physical_mapping_change(__pa(vaddr & pmask),
313 					       __pa((vaddr_end & pmask) + psize),
314 					       split_page_size_mask);
315 	}
316 
317 	ret = 0;
318 
319 out:
320 	__flush_tlb_all();
321 	return ret;
322 }
323 
early_set_memory_decrypted(unsigned long vaddr,unsigned long size)324 int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size)
325 {
326 	return early_set_memory_enc_dec(vaddr, size, false);
327 }
328 
early_set_memory_encrypted(unsigned long vaddr,unsigned long size)329 int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size)
330 {
331 	return early_set_memory_enc_dec(vaddr, size, true);
332 }
333 
334 /*
335  * SME and SEV are very similar but they are not the same, so there are
336  * times that the kernel will need to distinguish between SME and SEV. The
337  * sme_active() and sev_active() functions are used for this.  When a
338  * distinction isn't needed, the mem_encrypt_active() function can be used.
339  *
340  * The trampoline code is a good example for this requirement.  Before
341  * paging is activated, SME will access all memory as decrypted, but SEV
342  * will access all memory as encrypted.  So, when APs are being brought
343  * up under SME the trampoline area cannot be encrypted, whereas under SEV
344  * the trampoline area must be encrypted.
345  */
sme_active(void)346 bool sme_active(void)
347 {
348 	return sme_me_mask && !sev_enabled;
349 }
350 
sev_active(void)351 bool sev_active(void)
352 {
353 	return sev_status & MSR_AMD64_SEV_ENABLED;
354 }
355 EXPORT_SYMBOL_GPL(sev_active);
356 
357 /* Needs to be called from non-instrumentable code */
sev_es_active(void)358 bool noinstr sev_es_active(void)
359 {
360 	return sev_status & MSR_AMD64_SEV_ES_ENABLED;
361 }
362 
363 /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
force_dma_unencrypted(struct device * dev)364 bool force_dma_unencrypted(struct device *dev)
365 {
366 	/*
367 	 * For SEV, all DMA must be to unencrypted addresses.
368 	 */
369 	if (sev_active())
370 		return true;
371 
372 	/*
373 	 * For SME, all DMA must be to unencrypted addresses if the
374 	 * device does not support DMA to addresses that include the
375 	 * encryption mask.
376 	 */
377 	if (sme_active()) {
378 		u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask));
379 		u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask,
380 						dev->bus_dma_limit);
381 
382 		if (dma_dev_mask <= dma_enc_mask)
383 			return true;
384 	}
385 
386 	return false;
387 }
388 
mem_encrypt_free_decrypted_mem(void)389 void __init mem_encrypt_free_decrypted_mem(void)
390 {
391 	unsigned long vaddr, vaddr_end, npages;
392 	int r;
393 
394 	vaddr = (unsigned long)__start_bss_decrypted_unused;
395 	vaddr_end = (unsigned long)__end_bss_decrypted;
396 	npages = (vaddr_end - vaddr) >> PAGE_SHIFT;
397 
398 	/*
399 	 * The unused memory range was mapped decrypted, change the encryption
400 	 * attribute from decrypted to encrypted before freeing it.
401 	 */
402 	if (mem_encrypt_active()) {
403 		r = set_memory_encrypted(vaddr, npages);
404 		if (r) {
405 			pr_warn("failed to free unused decrypted pages\n");
406 			return;
407 		}
408 	}
409 
410 	free_init_pages("unused decrypted", vaddr, vaddr_end);
411 }
412 
print_mem_encrypt_feature_info(void)413 static void print_mem_encrypt_feature_info(void)
414 {
415 	pr_info("AMD Memory Encryption Features active:");
416 
417 	/* Secure Memory Encryption */
418 	if (sme_active()) {
419 		/*
420 		 * SME is mutually exclusive with any of the SEV
421 		 * features below.
422 		 */
423 		pr_cont(" SME\n");
424 		return;
425 	}
426 
427 	/* Secure Encrypted Virtualization */
428 	if (sev_active())
429 		pr_cont(" SEV");
430 
431 	/* Encrypted Register State */
432 	if (sev_es_active())
433 		pr_cont(" SEV-ES");
434 
435 	pr_cont("\n");
436 }
437 
438 /* Architecture __weak replacement functions */
mem_encrypt_init(void)439 void __init mem_encrypt_init(void)
440 {
441 	if (!sme_me_mask)
442 		return;
443 
444 	/* Call into SWIOTLB to update the SWIOTLB DMA buffers */
445 	swiotlb_update_mem_attributes();
446 
447 	/*
448 	 * With SEV, we need to unroll the rep string I/O instructions.
449 	 */
450 	if (sev_active())
451 		static_branch_enable(&sev_enable_key);
452 
453 	print_mem_encrypt_feature_info();
454 }
455 
456