• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * EFI stub implementation that is shared by arm and arm64 architectures.
4  * This should be #included by the EFI stub implementation files.
5  *
6  * Copyright (C) 2013,2014 Linaro Limited
7  *     Roy Franz <roy.franz@linaro.org
8  * Copyright (C) 2013 Red Hat, Inc.
9  *     Mark Salter <msalter@redhat.com>
10  */
11 
12 #include <linux/efi.h>
13 #include <asm/efi.h>
14 
15 #include "efistub.h"
16 
17 /*
18  * This is the base address at which to start allocating virtual memory ranges
19  * for UEFI Runtime Services.
20  *
21  * For ARM/ARM64:
22  * This is in the low TTBR0 range so that we can use
23  * any allocation we choose, and eliminate the risk of a conflict after kexec.
24  * The value chosen is the largest non-zero power of 2 suitable for this purpose
25  * both on 32-bit and 64-bit ARM CPUs, to maximize the likelihood that it can
26  * be mapped efficiently.
27  * Since 32-bit ARM could potentially execute with a 1G/3G user/kernel split,
28  * map everything below 1 GB. (512 MB is a reasonable upper bound for the
29  * entire footprint of the UEFI runtime services memory regions)
30  *
31  * For RISC-V:
32  * There is no specific reason for which, this address (512MB) can't be used
33  * EFI runtime virtual address for RISC-V. It also helps to use EFI runtime
34  * services on both RV32/RV64. Keep the same runtime virtual address for RISC-V
35  * as well to minimize the code churn.
36  */
37 #define EFI_RT_VIRTUAL_BASE	SZ_512M
38 #define EFI_RT_VIRTUAL_SIZE	SZ_512M
39 
40 #ifdef CONFIG_ARM64
41 # define EFI_RT_VIRTUAL_LIMIT	DEFAULT_MAP_WINDOW_64
42 #elif defined(CONFIG_RISCV) || defined(CONFIG_LOONGARCH)
43 # define EFI_RT_VIRTUAL_LIMIT	TASK_SIZE_MIN
44 #else /* Only if TASK_SIZE is a constant */
45 # define EFI_RT_VIRTUAL_LIMIT	TASK_SIZE
46 #endif
47 
48 /*
49  * Some architectures map the EFI regions into the kernel's linear map using a
50  * fixed offset.
51  */
52 #ifndef EFI_RT_VIRTUAL_OFFSET
53 #define EFI_RT_VIRTUAL_OFFSET	0
54 #endif
55 
56 static u64 virtmap_base = EFI_RT_VIRTUAL_BASE;
57 static bool flat_va_mapping = (EFI_RT_VIRTUAL_OFFSET != 0);
58 
setup_graphics(void)59 static struct screen_info *setup_graphics(void)
60 {
61 	efi_guid_t gop_proto = EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID;
62 	efi_status_t status;
63 	unsigned long size;
64 	void **gop_handle = NULL;
65 	struct screen_info *si = NULL;
66 
67 	size = 0;
68 	status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
69 			     &gop_proto, NULL, &size, gop_handle);
70 	if (status == EFI_BUFFER_TOO_SMALL) {
71 		si = alloc_screen_info();
72 		if (!si)
73 			return NULL;
74 		status = efi_setup_gop(si, &gop_proto, size);
75 		if (status != EFI_SUCCESS) {
76 			free_screen_info(si);
77 			return NULL;
78 		}
79 	}
80 	return si;
81 }
82 
install_memreserve_table(void)83 static void install_memreserve_table(void)
84 {
85 	struct linux_efi_memreserve *rsv;
86 	efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID;
87 	efi_status_t status;
88 
89 	status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv),
90 			     (void **)&rsv);
91 	if (status != EFI_SUCCESS) {
92 		efi_err("Failed to allocate memreserve entry!\n");
93 		return;
94 	}
95 
96 	rsv->next = 0;
97 	rsv->size = 0;
98 	atomic_set(&rsv->count, 0);
99 
100 	status = efi_bs_call(install_configuration_table,
101 			     &memreserve_table_guid, rsv);
102 	if (status != EFI_SUCCESS)
103 		efi_err("Failed to install memreserve config table!\n");
104 }
105 
get_supported_rt_services(void)106 static u32 get_supported_rt_services(void)
107 {
108 	const efi_rt_properties_table_t *rt_prop_table;
109 	u32 supported = EFI_RT_SUPPORTED_ALL;
110 
111 	rt_prop_table = get_efi_config_table(EFI_RT_PROPERTIES_TABLE_GUID);
112 	if (rt_prop_table)
113 		supported &= rt_prop_table->runtime_services_supported;
114 
115 	return supported;
116 }
117 
118 /*
119  * EFI entry point for the arm/arm64 EFI stubs.  This is the entrypoint
120  * that is described in the PE/COFF header.  Most of the code is the same
121  * for both archictectures, with the arch-specific code provided in the
122  * handle_kernel_image() function.
123  */
efi_pe_entry(efi_handle_t handle,efi_system_table_t * sys_table_arg)124 efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
125 				   efi_system_table_t *sys_table_arg)
126 {
127 	efi_loaded_image_t *image;
128 	efi_status_t status;
129 	unsigned long image_addr;
130 	unsigned long image_size = 0;
131 	/* addr/point and size pairs for memory management*/
132 	char *cmdline_ptr = NULL;
133 	int cmdline_size = 0;
134 	efi_guid_t loaded_image_proto = LOADED_IMAGE_PROTOCOL_GUID;
135 	unsigned long reserve_addr = 0;
136 	unsigned long reserve_size = 0;
137 	struct screen_info *si;
138 	efi_properties_table_t *prop_tbl;
139 
140 	efi_system_table = sys_table_arg;
141 
142 	/* Check if we were booted by the EFI firmware */
143 	if (efi_system_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) {
144 		status = EFI_INVALID_PARAMETER;
145 		goto fail;
146 	}
147 
148 	status = check_platform_features();
149 	if (status != EFI_SUCCESS)
150 		goto fail;
151 
152 	/*
153 	 * Get a handle to the loaded image protocol.  This is used to get
154 	 * information about the running image, such as size and the command
155 	 * line.
156 	 */
157 	status = efi_bs_call(handle_protocol, handle, &loaded_image_proto,
158 			     (void *)&image);
159 	if (status != EFI_SUCCESS) {
160 		efi_err("Failed to get loaded image protocol\n");
161 		goto fail;
162 	}
163 
164 	/*
165 	 * Get the command line from EFI, using the LOADED_IMAGE
166 	 * protocol. We are going to copy the command line into the
167 	 * device tree, so this can be allocated anywhere.
168 	 */
169 	cmdline_ptr = efi_convert_cmdline(image, &cmdline_size);
170 	if (!cmdline_ptr) {
171 		efi_err("getting command line via LOADED_IMAGE_PROTOCOL\n");
172 		status = EFI_OUT_OF_RESOURCES;
173 		goto fail;
174 	}
175 
176 	if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) ||
177 	    IS_ENABLED(CONFIG_CMDLINE_FORCE) ||
178 	    cmdline_size == 0) {
179 		status = efi_parse_options(CONFIG_CMDLINE);
180 		if (status != EFI_SUCCESS) {
181 			efi_err("Failed to parse options\n");
182 			goto fail_free_cmdline;
183 		}
184 	}
185 
186 	if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && cmdline_size > 0) {
187 		status = efi_parse_options(cmdline_ptr);
188 		if (status != EFI_SUCCESS) {
189 			efi_err("Failed to parse options\n");
190 			goto fail_free_cmdline;
191 		}
192 	}
193 
194 	efi_info("Booting Linux Kernel...\n");
195 
196 	si = setup_graphics();
197 
198 	status = handle_kernel_image(&image_addr, &image_size,
199 				     &reserve_addr,
200 				     &reserve_size,
201 				     image, handle);
202 	if (status != EFI_SUCCESS) {
203 		efi_err("Failed to relocate kernel\n");
204 		goto fail_free_screeninfo;
205 	}
206 
207 	efi_retrieve_tpm2_eventlog();
208 
209 	/* Ask the firmware to clear memory on unclean shutdown */
210 	efi_enable_reset_attack_mitigation();
211 
212 	efi_load_initrd(image, ULONG_MAX, efi_get_max_initrd_addr(image_addr),
213 			NULL);
214 
215 	efi_random_get_seed();
216 
217 	/*
218 	 * If the NX PE data feature is enabled in the properties table, we
219 	 * should take care not to create a virtual mapping that changes the
220 	 * relative placement of runtime services code and data regions, as
221 	 * they may belong to the same PE/COFF executable image in memory.
222 	 * The easiest way to achieve that is to simply use a 1:1 mapping.
223 	 */
224 	prop_tbl = get_efi_config_table(EFI_PROPERTIES_TABLE_GUID);
225 	flat_va_mapping |= prop_tbl &&
226 			   (prop_tbl->memory_protection_attribute &
227 			   EFI_PROPERTIES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA);
228 
229 	/* force efi_novamap if SetVirtualAddressMap() is unsupported */
230 	efi_novamap |= !(get_supported_rt_services() &
231 			 EFI_RT_SUPPORTED_SET_VIRTUAL_ADDRESS_MAP);
232 
233 	/* hibernation expects the runtime regions to stay in the same place */
234 	if (!IS_ENABLED(CONFIG_HIBERNATION) && !efi_nokaslr && !flat_va_mapping) {
235 		/*
236 		 * Randomize the base of the UEFI runtime services region.
237 		 * Preserve the 2 MB alignment of the region by taking a
238 		 * shift of 21 bit positions into account when scaling
239 		 * the headroom value using a 32-bit random value.
240 		 */
241 		static const u64 headroom = EFI_RT_VIRTUAL_LIMIT -
242 					    EFI_RT_VIRTUAL_BASE -
243 					    EFI_RT_VIRTUAL_SIZE;
244 		u32 rnd;
245 
246 		status = efi_get_random_bytes(sizeof(rnd), (u8 *)&rnd);
247 		if (status == EFI_SUCCESS) {
248 			virtmap_base = EFI_RT_VIRTUAL_BASE +
249 				       (((headroom >> 21) * rnd) >> (32 - 21));
250 		}
251 	}
252 
253 	install_memreserve_table();
254 
255 	status = efi_boot_kernel(handle, image, image_addr, cmdline_ptr);
256 
257 	efi_free(image_size, image_addr);
258 	efi_free(reserve_size, reserve_addr);
259 fail_free_screeninfo:
260 	free_screen_info(si);
261 fail_free_cmdline:
262 	efi_bs_call(free_pool, cmdline_ptr);
263 fail:
264 	return status;
265 }
266 
267 /*
268  * efi_allocate_virtmap() - create a pool allocation for the virtmap
269  *
270  * Create an allocation that is of sufficient size to hold all the memory
271  * descriptors that will be passed to SetVirtualAddressMap() to inform the
272  * firmware about the virtual mapping that will be used under the OS to call
273  * into the firmware.
274  */
efi_alloc_virtmap(efi_memory_desc_t ** virtmap,unsigned long * desc_size,u32 * desc_ver)275 efi_status_t efi_alloc_virtmap(efi_memory_desc_t **virtmap,
276 			       unsigned long *desc_size, u32 *desc_ver)
277 {
278 	unsigned long size, mmap_key;
279 	efi_status_t status;
280 
281 	/*
282 	 * Use the size of the current memory map as an upper bound for the
283 	 * size of the buffer we need to pass to SetVirtualAddressMap() to
284 	 * cover all EFI_MEMORY_RUNTIME regions.
285 	 */
286 	size = 0;
287 	status = efi_bs_call(get_memory_map, &size, NULL, &mmap_key, desc_size,
288 			     desc_ver);
289 	if (status != EFI_BUFFER_TOO_SMALL)
290 		return EFI_LOAD_ERROR;
291 
292 	return efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
293 			   (void **)virtmap);
294 }
295 
296 /*
297  * efi_get_virtmap() - create a virtual mapping for the EFI memory map
298  *
299  * This function populates the virt_addr fields of all memory region descriptors
300  * in @memory_map whose EFI_MEMORY_RUNTIME attribute is set. Those descriptors
301  * are also copied to @runtime_map, and their total count is returned in @count.
302  */
efi_get_virtmap(efi_memory_desc_t * memory_map,unsigned long map_size,unsigned long desc_size,efi_memory_desc_t * runtime_map,int * count)303 void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
304 		     unsigned long desc_size, efi_memory_desc_t *runtime_map,
305 		     int *count)
306 {
307 	u64 efi_virt_base = virtmap_base;
308 	efi_memory_desc_t *in, *out = runtime_map;
309 	int l;
310 
311 	*count = 0;
312 
313 	for (l = 0; l < map_size; l += desc_size) {
314 		u64 paddr, size;
315 
316 		in = (void *)memory_map + l;
317 		if (!(in->attribute & EFI_MEMORY_RUNTIME))
318 			continue;
319 
320 		paddr = in->phys_addr;
321 		size = in->num_pages * EFI_PAGE_SIZE;
322 
323 		in->virt_addr = in->phys_addr + EFI_RT_VIRTUAL_OFFSET;
324 		if (efi_novamap) {
325 			continue;
326 		}
327 
328 		/*
329 		 * Make the mapping compatible with 64k pages: this allows
330 		 * a 4k page size kernel to kexec a 64k page size kernel and
331 		 * vice versa.
332 		 */
333 		if (!flat_va_mapping) {
334 
335 			paddr = round_down(in->phys_addr, SZ_64K);
336 			size += in->phys_addr - paddr;
337 
338 			/*
339 			 * Avoid wasting memory on PTEs by choosing a virtual
340 			 * base that is compatible with section mappings if this
341 			 * region has the appropriate size and physical
342 			 * alignment. (Sections are 2 MB on 4k granule kernels)
343 			 */
344 			if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M)
345 				efi_virt_base = round_up(efi_virt_base, SZ_2M);
346 			else
347 				efi_virt_base = round_up(efi_virt_base, SZ_64K);
348 
349 			in->virt_addr += efi_virt_base - paddr;
350 			efi_virt_base += size;
351 		}
352 
353 		memcpy(out, in, desc_size);
354 		out = (void *)out + desc_size;
355 		++*count;
356 	}
357 }
358