• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * efi.c - EFI subsystem
4  *
5  * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com>
6  * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
7  * Copyright (C) 2013 Tom Gundersen <teg@jklm.no>
8  *
9  * This code registers /sys/firmware/efi{,/efivars} when EFI is supported,
10  * allowing the efivarfs to be mounted or the efivars module to be loaded.
11  * The existance of /sys/firmware/efi may also be used by userspace to
12  * determine that the system supports EFI.
13  */
14 
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 
17 #include <linux/kobject.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/debugfs.h>
21 #include <linux/device.h>
22 #include <linux/efi.h>
23 #include <linux/of.h>
24 #include <linux/io.h>
25 #include <linux/kexec.h>
26 #include <linux/platform_device.h>
27 #include <linux/random.h>
28 #include <linux/reboot.h>
29 #include <linux/slab.h>
30 #include <linux/acpi.h>
31 #include <linux/ucs2_string.h>
32 #include <linux/memblock.h>
33 #include <linux/security.h>
34 
35 #include <asm/early_ioremap.h>
36 
37 struct efi __read_mostly efi = {
38 	.runtime_supported_mask = EFI_RT_SUPPORTED_ALL,
39 	.acpi			= EFI_INVALID_TABLE_ADDR,
40 	.acpi20			= EFI_INVALID_TABLE_ADDR,
41 	.smbios			= EFI_INVALID_TABLE_ADDR,
42 	.smbios3		= EFI_INVALID_TABLE_ADDR,
43 	.esrt			= EFI_INVALID_TABLE_ADDR,
44 	.tpm_log		= EFI_INVALID_TABLE_ADDR,
45 	.tpm_final_log		= EFI_INVALID_TABLE_ADDR,
46 #ifdef CONFIG_LOAD_UEFI_KEYS
47 	.mokvar_table		= EFI_INVALID_TABLE_ADDR,
48 #endif
49 };
50 EXPORT_SYMBOL(efi);
51 
52 unsigned long __ro_after_init efi_rng_seed = EFI_INVALID_TABLE_ADDR;
53 static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR;
54 static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR;
55 
56 struct mm_struct efi_mm = {
57 	.mm_rb			= RB_ROOT,
58 	.mm_users		= ATOMIC_INIT(2),
59 	.mm_count		= ATOMIC_INIT(1),
60 	.write_protect_seq      = SEQCNT_ZERO(efi_mm.write_protect_seq),
61 	MMAP_LOCK_INITIALIZER(efi_mm)
62 	.page_table_lock	= __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
63 	.mmlist			= LIST_HEAD_INIT(efi_mm.mmlist),
64 	.cpu_bitmap		= { [BITS_TO_LONGS(NR_CPUS)] = 0},
65 };
66 
67 struct workqueue_struct *efi_rts_wq;
68 
69 static bool disable_runtime;
setup_noefi(char * arg)70 static int __init setup_noefi(char *arg)
71 {
72 	disable_runtime = true;
73 	return 0;
74 }
75 early_param("noefi", setup_noefi);
76 
efi_runtime_disabled(void)77 bool efi_runtime_disabled(void)
78 {
79 	return disable_runtime;
80 }
81 
__efi_soft_reserve_enabled(void)82 bool __pure __efi_soft_reserve_enabled(void)
83 {
84 	return !efi_enabled(EFI_MEM_NO_SOFT_RESERVE);
85 }
86 
parse_efi_cmdline(char * str)87 static int __init parse_efi_cmdline(char *str)
88 {
89 	if (!str) {
90 		pr_warn("need at least one option\n");
91 		return -EINVAL;
92 	}
93 
94 	if (parse_option_str(str, "debug"))
95 		set_bit(EFI_DBG, &efi.flags);
96 
97 	if (parse_option_str(str, "noruntime"))
98 		disable_runtime = true;
99 
100 	if (parse_option_str(str, "nosoftreserve"))
101 		set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags);
102 
103 	return 0;
104 }
105 early_param("efi", parse_efi_cmdline);
106 
107 struct kobject *efi_kobj;
108 
109 /*
110  * Let's not leave out systab information that snuck into
111  * the efivars driver
112  * Note, do not add more fields in systab sysfs file as it breaks sysfs
113  * one value per file rule!
114  */
systab_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)115 static ssize_t systab_show(struct kobject *kobj,
116 			   struct kobj_attribute *attr, char *buf)
117 {
118 	char *str = buf;
119 
120 	if (!kobj || !buf)
121 		return -EINVAL;
122 
123 	if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
124 		str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20);
125 	if (efi.acpi != EFI_INVALID_TABLE_ADDR)
126 		str += sprintf(str, "ACPI=0x%lx\n", efi.acpi);
127 	/*
128 	 * If both SMBIOS and SMBIOS3 entry points are implemented, the
129 	 * SMBIOS3 entry point shall be preferred, so we list it first to
130 	 * let applications stop parsing after the first match.
131 	 */
132 	if (efi.smbios3 != EFI_INVALID_TABLE_ADDR)
133 		str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3);
134 	if (efi.smbios != EFI_INVALID_TABLE_ADDR)
135 		str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
136 
137 	if (IS_ENABLED(CONFIG_IA64) || IS_ENABLED(CONFIG_X86))
138 		str = efi_systab_show_arch(str);
139 
140 	return str - buf;
141 }
142 
143 static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400);
144 
fw_platform_size_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)145 static ssize_t fw_platform_size_show(struct kobject *kobj,
146 				     struct kobj_attribute *attr, char *buf)
147 {
148 	return sprintf(buf, "%d\n", efi_enabled(EFI_64BIT) ? 64 : 32);
149 }
150 
151 extern __weak struct kobj_attribute efi_attr_fw_vendor;
152 extern __weak struct kobj_attribute efi_attr_runtime;
153 extern __weak struct kobj_attribute efi_attr_config_table;
154 static struct kobj_attribute efi_attr_fw_platform_size =
155 	__ATTR_RO(fw_platform_size);
156 
157 static struct attribute *efi_subsys_attrs[] = {
158 	&efi_attr_systab.attr,
159 	&efi_attr_fw_platform_size.attr,
160 	&efi_attr_fw_vendor.attr,
161 	&efi_attr_runtime.attr,
162 	&efi_attr_config_table.attr,
163 	NULL,
164 };
165 
efi_attr_is_visible(struct kobject * kobj,struct attribute * attr,int n)166 umode_t __weak efi_attr_is_visible(struct kobject *kobj, struct attribute *attr,
167 				   int n)
168 {
169 	return attr->mode;
170 }
171 
172 static const struct attribute_group efi_subsys_attr_group = {
173 	.attrs = efi_subsys_attrs,
174 	.is_visible = efi_attr_is_visible,
175 };
176 
177 static struct efivars generic_efivars;
178 static struct efivar_operations generic_ops;
179 
generic_ops_register(void)180 static int generic_ops_register(void)
181 {
182 	generic_ops.get_variable = efi.get_variable;
183 	generic_ops.get_next_variable = efi.get_next_variable;
184 	generic_ops.query_variable_store = efi_query_variable_store;
185 
186 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) {
187 		generic_ops.set_variable = efi.set_variable;
188 		generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
189 	}
190 	return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
191 }
192 
generic_ops_unregister(void)193 static void generic_ops_unregister(void)
194 {
195 	efivars_unregister(&generic_efivars);
196 }
197 
198 #ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS
199 #define EFIVAR_SSDT_NAME_MAX	16
200 static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata;
efivar_ssdt_setup(char * str)201 static int __init efivar_ssdt_setup(char *str)
202 {
203 	int ret = security_locked_down(LOCKDOWN_ACPI_TABLES);
204 
205 	if (ret)
206 		return ret;
207 
208 	if (strlen(str) < sizeof(efivar_ssdt))
209 		memcpy(efivar_ssdt, str, strlen(str));
210 	else
211 		pr_warn("efivar_ssdt: name too long: %s\n", str);
212 	return 1;
213 }
214 __setup("efivar_ssdt=", efivar_ssdt_setup);
215 
efivar_ssdt_iter(efi_char16_t * name,efi_guid_t vendor,unsigned long name_size,void * data)216 static __init int efivar_ssdt_iter(efi_char16_t *name, efi_guid_t vendor,
217 				   unsigned long name_size, void *data)
218 {
219 	struct efivar_entry *entry;
220 	struct list_head *list = data;
221 	char utf8_name[EFIVAR_SSDT_NAME_MAX];
222 	int limit = min_t(unsigned long, EFIVAR_SSDT_NAME_MAX, name_size);
223 
224 	ucs2_as_utf8(utf8_name, name, limit - 1);
225 	if (strncmp(utf8_name, efivar_ssdt, limit) != 0)
226 		return 0;
227 
228 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
229 	if (!entry)
230 		return 0;
231 
232 	memcpy(entry->var.VariableName, name, name_size);
233 	memcpy(&entry->var.VendorGuid, &vendor, sizeof(efi_guid_t));
234 
235 	efivar_entry_add(entry, list);
236 
237 	return 0;
238 }
239 
efivar_ssdt_load(void)240 static __init int efivar_ssdt_load(void)
241 {
242 	LIST_HEAD(entries);
243 	struct efivar_entry *entry, *aux;
244 	unsigned long size;
245 	void *data;
246 	int ret;
247 
248 	if (!efivar_ssdt[0])
249 		return 0;
250 
251 	ret = efivar_init(efivar_ssdt_iter, &entries, true, &entries);
252 
253 	list_for_each_entry_safe(entry, aux, &entries, list) {
254 		pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt,
255 			&entry->var.VendorGuid);
256 
257 		list_del(&entry->list);
258 
259 		ret = efivar_entry_size(entry, &size);
260 		if (ret) {
261 			pr_err("failed to get var size\n");
262 			goto free_entry;
263 		}
264 
265 		data = kmalloc(size, GFP_KERNEL);
266 		if (!data) {
267 			ret = -ENOMEM;
268 			goto free_entry;
269 		}
270 
271 		ret = efivar_entry_get(entry, NULL, &size, data);
272 		if (ret) {
273 			pr_err("failed to get var data\n");
274 			goto free_data;
275 		}
276 
277 		ret = acpi_load_table(data, NULL);
278 		if (ret) {
279 			pr_err("failed to load table: %d\n", ret);
280 			goto free_data;
281 		}
282 
283 		goto free_entry;
284 
285 free_data:
286 		kfree(data);
287 
288 free_entry:
289 		kfree(entry);
290 	}
291 
292 	return ret;
293 }
294 #else
efivar_ssdt_load(void)295 static inline int efivar_ssdt_load(void) { return 0; }
296 #endif
297 
298 #ifdef CONFIG_DEBUG_FS
299 
300 #define EFI_DEBUGFS_MAX_BLOBS 32
301 
302 static struct debugfs_blob_wrapper debugfs_blob[EFI_DEBUGFS_MAX_BLOBS];
303 
efi_debugfs_init(void)304 static void __init efi_debugfs_init(void)
305 {
306 	struct dentry *efi_debugfs;
307 	efi_memory_desc_t *md;
308 	char name[32];
309 	int type_count[EFI_BOOT_SERVICES_DATA + 1] = {};
310 	int i = 0;
311 
312 	efi_debugfs = debugfs_create_dir("efi", NULL);
313 	if (IS_ERR_OR_NULL(efi_debugfs))
314 		return;
315 
316 	for_each_efi_memory_desc(md) {
317 		switch (md->type) {
318 		case EFI_BOOT_SERVICES_CODE:
319 			snprintf(name, sizeof(name), "boot_services_code%d",
320 				 type_count[md->type]++);
321 			break;
322 		case EFI_BOOT_SERVICES_DATA:
323 			snprintf(name, sizeof(name), "boot_services_data%d",
324 				 type_count[md->type]++);
325 			break;
326 		default:
327 			continue;
328 		}
329 
330 		if (i >= EFI_DEBUGFS_MAX_BLOBS) {
331 			pr_warn("More then %d EFI boot service segments, only showing first %d in debugfs\n",
332 				EFI_DEBUGFS_MAX_BLOBS, EFI_DEBUGFS_MAX_BLOBS);
333 			break;
334 		}
335 
336 		debugfs_blob[i].size = md->num_pages << EFI_PAGE_SHIFT;
337 		debugfs_blob[i].data = memremap(md->phys_addr,
338 						debugfs_blob[i].size,
339 						MEMREMAP_WB);
340 		if (!debugfs_blob[i].data)
341 			continue;
342 
343 		debugfs_create_blob(name, 0400, efi_debugfs, &debugfs_blob[i]);
344 		i++;
345 	}
346 }
347 #else
efi_debugfs_init(void)348 static inline void efi_debugfs_init(void) {}
349 #endif
350 
351 /*
352  * We register the efi subsystem with the firmware subsystem and the
353  * efivars subsystem with the efi subsystem, if the system was booted with
354  * EFI.
355  */
efisubsys_init(void)356 static int __init efisubsys_init(void)
357 {
358 	int error;
359 
360 	if (!efi_enabled(EFI_RUNTIME_SERVICES))
361 		efi.runtime_supported_mask = 0;
362 
363 	if (!efi_enabled(EFI_BOOT))
364 		return 0;
365 
366 	if (efi.runtime_supported_mask) {
367 		/*
368 		 * Since we process only one efi_runtime_service() at a time, an
369 		 * ordered workqueue (which creates only one execution context)
370 		 * should suffice for all our needs.
371 		 */
372 		efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0);
373 		if (!efi_rts_wq) {
374 			pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n");
375 			clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
376 			efi.runtime_supported_mask = 0;
377 			return 0;
378 		}
379 	}
380 
381 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_TIME_SERVICES))
382 		platform_device_register_simple("rtc-efi", 0, NULL, 0);
383 
384 	/* We register the efi directory at /sys/firmware/efi */
385 	efi_kobj = kobject_create_and_add("efi", firmware_kobj);
386 	if (!efi_kobj) {
387 		pr_err("efi: Firmware registration failed.\n");
388 		error = -ENOMEM;
389 		goto err_destroy_wq;
390 	}
391 
392 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
393 				      EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) {
394 		error = generic_ops_register();
395 		if (error)
396 			goto err_put;
397 		efivar_ssdt_load();
398 		platform_device_register_simple("efivars", 0, NULL, 0);
399 	}
400 
401 	error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group);
402 	if (error) {
403 		pr_err("efi: Sysfs attribute export failed with error %d.\n",
404 		       error);
405 		goto err_unregister;
406 	}
407 
408 	error = efi_runtime_map_init(efi_kobj);
409 	if (error)
410 		goto err_remove_group;
411 
412 	/* and the standard mountpoint for efivarfs */
413 	error = sysfs_create_mount_point(efi_kobj, "efivars");
414 	if (error) {
415 		pr_err("efivars: Subsystem registration failed.\n");
416 		goto err_remove_group;
417 	}
418 
419 	if (efi_enabled(EFI_DBG) && efi_enabled(EFI_PRESERVE_BS_REGIONS))
420 		efi_debugfs_init();
421 
422 	return 0;
423 
424 err_remove_group:
425 	sysfs_remove_group(efi_kobj, &efi_subsys_attr_group);
426 err_unregister:
427 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
428 				      EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME))
429 		generic_ops_unregister();
430 err_put:
431 	kobject_put(efi_kobj);
432 err_destroy_wq:
433 	if (efi_rts_wq)
434 		destroy_workqueue(efi_rts_wq);
435 
436 	return error;
437 }
438 
439 subsys_initcall(efisubsys_init);
440 
441 /*
442  * Find the efi memory descriptor for a given physical address.  Given a
443  * physical address, determine if it exists within an EFI Memory Map entry,
444  * and if so, populate the supplied memory descriptor with the appropriate
445  * data.
446  */
efi_mem_desc_lookup(u64 phys_addr,efi_memory_desc_t * out_md)447 int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
448 {
449 	efi_memory_desc_t *md;
450 
451 	if (!efi_enabled(EFI_MEMMAP)) {
452 		pr_err_once("EFI_MEMMAP is not enabled.\n");
453 		return -EINVAL;
454 	}
455 
456 	if (!out_md) {
457 		pr_err_once("out_md is null.\n");
458 		return -EINVAL;
459         }
460 
461 	for_each_efi_memory_desc(md) {
462 		u64 size;
463 		u64 end;
464 
465 		size = md->num_pages << EFI_PAGE_SHIFT;
466 		end = md->phys_addr + size;
467 		if (phys_addr >= md->phys_addr && phys_addr < end) {
468 			memcpy(out_md, md, sizeof(*out_md));
469 			return 0;
470 		}
471 	}
472 	return -ENOENT;
473 }
474 
475 /*
476  * Calculate the highest address of an efi memory descriptor.
477  */
efi_mem_desc_end(efi_memory_desc_t * md)478 u64 __init efi_mem_desc_end(efi_memory_desc_t *md)
479 {
480 	u64 size = md->num_pages << EFI_PAGE_SHIFT;
481 	u64 end = md->phys_addr + size;
482 	return end;
483 }
484 
efi_arch_mem_reserve(phys_addr_t addr,u64 size)485 void __init __weak efi_arch_mem_reserve(phys_addr_t addr, u64 size) {}
486 
487 /**
488  * efi_mem_reserve - Reserve an EFI memory region
489  * @addr: Physical address to reserve
490  * @size: Size of reservation
491  *
492  * Mark a region as reserved from general kernel allocation and
493  * prevent it being released by efi_free_boot_services().
494  *
495  * This function should be called drivers once they've parsed EFI
496  * configuration tables to figure out where their data lives, e.g.
497  * efi_esrt_init().
498  */
efi_mem_reserve(phys_addr_t addr,u64 size)499 void __init efi_mem_reserve(phys_addr_t addr, u64 size)
500 {
501 	if (!memblock_is_region_reserved(addr, size))
502 		memblock_reserve(addr, size);
503 
504 	/*
505 	 * Some architectures (x86) reserve all boot services ranges
506 	 * until efi_free_boot_services() because of buggy firmware
507 	 * implementations. This means the above memblock_reserve() is
508 	 * superfluous on x86 and instead what it needs to do is
509 	 * ensure the @start, @size is not freed.
510 	 */
511 	efi_arch_mem_reserve(addr, size);
512 }
513 
514 static const efi_config_table_type_t common_tables[] __initconst = {
515 	{ACPI_20_TABLE_GUID,			&efi.acpi20,		"ACPI 2.0"	},
516 	{ACPI_TABLE_GUID,			&efi.acpi,		"ACPI"		},
517 	{SMBIOS_TABLE_GUID,			&efi.smbios,		"SMBIOS"	},
518 	{SMBIOS3_TABLE_GUID,			&efi.smbios3,		"SMBIOS 3.0"	},
519 	{EFI_SYSTEM_RESOURCE_TABLE_GUID,	&efi.esrt,		"ESRT"		},
520 	{EFI_MEMORY_ATTRIBUTES_TABLE_GUID,	&efi_mem_attr_table,	"MEMATTR"	},
521 	{LINUX_EFI_RANDOM_SEED_TABLE_GUID,	&efi_rng_seed,		"RNG"		},
522 	{LINUX_EFI_TPM_EVENT_LOG_GUID,		&efi.tpm_log,		"TPMEventLog"	},
523 	{LINUX_EFI_TPM_FINAL_LOG_GUID,		&efi.tpm_final_log,	"TPMFinalLog"	},
524 	{LINUX_EFI_MEMRESERVE_TABLE_GUID,	&mem_reserve,		"MEMRESERVE"	},
525 	{EFI_RT_PROPERTIES_TABLE_GUID,		&rt_prop,		"RTPROP"	},
526 #ifdef CONFIG_EFI_RCI2_TABLE
527 	{DELLEMC_EFI_RCI2_TABLE_GUID,		&rci2_table_phys			},
528 #endif
529 #ifdef CONFIG_LOAD_UEFI_KEYS
530 	{LINUX_EFI_MOK_VARIABLE_TABLE_GUID,	&efi.mokvar_table,	"MOKvar"	},
531 #endif
532 	{},
533 };
534 
match_config_table(const efi_guid_t * guid,unsigned long table,const efi_config_table_type_t * table_types)535 static __init int match_config_table(const efi_guid_t *guid,
536 				     unsigned long table,
537 				     const efi_config_table_type_t *table_types)
538 {
539 	int i;
540 
541 	for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) {
542 		if (!efi_guidcmp(*guid, table_types[i].guid)) {
543 			*(table_types[i].ptr) = table;
544 			if (table_types[i].name[0])
545 				pr_cont("%s=0x%lx ",
546 					table_types[i].name, table);
547 			return 1;
548 		}
549 	}
550 
551 	return 0;
552 }
553 
efi_config_parse_tables(const efi_config_table_t * config_tables,int count,const efi_config_table_type_t * arch_tables)554 int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
555 				   int count,
556 				   const efi_config_table_type_t *arch_tables)
557 {
558 	const efi_config_table_64_t *tbl64 = (void *)config_tables;
559 	const efi_config_table_32_t *tbl32 = (void *)config_tables;
560 	const efi_guid_t *guid;
561 	unsigned long table;
562 	int i;
563 
564 	pr_info("");
565 	for (i = 0; i < count; i++) {
566 		if (!IS_ENABLED(CONFIG_X86)) {
567 			guid = &config_tables[i].guid;
568 			table = (unsigned long)config_tables[i].table;
569 		} else if (efi_enabled(EFI_64BIT)) {
570 			guid = &tbl64[i].guid;
571 			table = tbl64[i].table;
572 
573 			if (IS_ENABLED(CONFIG_X86_32) &&
574 			    tbl64[i].table > U32_MAX) {
575 				pr_cont("\n");
576 				pr_err("Table located above 4GB, disabling EFI.\n");
577 				return -EINVAL;
578 			}
579 		} else {
580 			guid = &tbl32[i].guid;
581 			table = tbl32[i].table;
582 		}
583 
584 		if (!match_config_table(guid, table, common_tables) && arch_tables)
585 			match_config_table(guid, table, arch_tables);
586 	}
587 	pr_cont("\n");
588 	set_bit(EFI_CONFIG_TABLES, &efi.flags);
589 
590 	if (efi_rng_seed != EFI_INVALID_TABLE_ADDR) {
591 		struct linux_efi_random_seed *seed;
592 		u32 size = 0;
593 
594 		seed = early_memremap(efi_rng_seed, sizeof(*seed));
595 		if (seed != NULL) {
596 			size = min_t(u32, seed->size, SZ_1K); // sanity check
597 			early_memunmap(seed, sizeof(*seed));
598 		} else {
599 			pr_err("Could not map UEFI random seed!\n");
600 		}
601 		if (size > 0) {
602 			seed = early_memremap(efi_rng_seed,
603 					      sizeof(*seed) + size);
604 			if (seed != NULL) {
605 				add_bootloader_randomness(seed->bits, size);
606 				memzero_explicit(seed->bits, size);
607 				early_memunmap(seed, sizeof(*seed) + size);
608 			} else {
609 				pr_err("Could not map UEFI random seed!\n");
610 			}
611 		}
612 	}
613 
614 	if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP))
615 		efi_memattr_init();
616 
617 	efi_tpm_eventlog_init();
618 
619 	if (mem_reserve != EFI_INVALID_TABLE_ADDR) {
620 		unsigned long prsv = mem_reserve;
621 
622 		while (prsv) {
623 			struct linux_efi_memreserve *rsv;
624 			u8 *p;
625 
626 			/*
627 			 * Just map a full page: that is what we will get
628 			 * anyway, and it permits us to map the entire entry
629 			 * before knowing its size.
630 			 */
631 			p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE),
632 					   PAGE_SIZE);
633 			if (p == NULL) {
634 				pr_err("Could not map UEFI memreserve entry!\n");
635 				return -ENOMEM;
636 			}
637 
638 			rsv = (void *)(p + prsv % PAGE_SIZE);
639 
640 			/* reserve the entry itself */
641 			memblock_reserve(prsv,
642 					 struct_size(rsv, entry, rsv->size));
643 
644 			for (i = 0; i < atomic_read(&rsv->count); i++) {
645 				memblock_reserve(rsv->entry[i].base,
646 						 rsv->entry[i].size);
647 			}
648 
649 			prsv = rsv->next;
650 			early_memunmap(p, PAGE_SIZE);
651 		}
652 	}
653 
654 	if (rt_prop != EFI_INVALID_TABLE_ADDR) {
655 		efi_rt_properties_table_t *tbl;
656 
657 		tbl = early_memremap(rt_prop, sizeof(*tbl));
658 		if (tbl) {
659 			efi.runtime_supported_mask &= tbl->runtime_services_supported;
660 			early_memunmap(tbl, sizeof(*tbl));
661 		}
662 	}
663 
664 	return 0;
665 }
666 
efi_systab_check_header(const efi_table_hdr_t * systab_hdr,int min_major_version)667 int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr,
668 				   int min_major_version)
669 {
670 	if (systab_hdr->signature != EFI_SYSTEM_TABLE_SIGNATURE) {
671 		pr_err("System table signature incorrect!\n");
672 		return -EINVAL;
673 	}
674 
675 	if ((systab_hdr->revision >> 16) < min_major_version)
676 		pr_err("Warning: System table version %d.%02d, expected %d.00 or greater!\n",
677 		       systab_hdr->revision >> 16,
678 		       systab_hdr->revision & 0xffff,
679 		       min_major_version);
680 
681 	return 0;
682 }
683 
684 #ifndef CONFIG_IA64
map_fw_vendor(unsigned long fw_vendor,size_t size)685 static const efi_char16_t *__init map_fw_vendor(unsigned long fw_vendor,
686 						size_t size)
687 {
688 	const efi_char16_t *ret;
689 
690 	ret = early_memremap_ro(fw_vendor, size);
691 	if (!ret)
692 		pr_err("Could not map the firmware vendor!\n");
693 	return ret;
694 }
695 
unmap_fw_vendor(const void * fw_vendor,size_t size)696 static void __init unmap_fw_vendor(const void *fw_vendor, size_t size)
697 {
698 	early_memunmap((void *)fw_vendor, size);
699 }
700 #else
701 #define map_fw_vendor(p, s)	__va(p)
702 #define unmap_fw_vendor(v, s)
703 #endif
704 
efi_systab_report_header(const efi_table_hdr_t * systab_hdr,unsigned long fw_vendor)705 void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
706 				     unsigned long fw_vendor)
707 {
708 	char vendor[100] = "unknown";
709 	const efi_char16_t *c16;
710 	size_t i;
711 
712 	c16 = map_fw_vendor(fw_vendor, sizeof(vendor) * sizeof(efi_char16_t));
713 	if (c16) {
714 		for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i)
715 			vendor[i] = c16[i];
716 		vendor[i] = '\0';
717 
718 		unmap_fw_vendor(c16, sizeof(vendor) * sizeof(efi_char16_t));
719 	}
720 
721 	pr_info("EFI v%u.%.02u by %s\n",
722 		systab_hdr->revision >> 16,
723 		systab_hdr->revision & 0xffff,
724 		vendor);
725 
726 	if (IS_ENABLED(CONFIG_X86_64) &&
727 	    systab_hdr->revision > EFI_1_10_SYSTEM_TABLE_REVISION &&
728 	    !strcmp(vendor, "Apple")) {
729 		pr_info("Apple Mac detected, using EFI v1.10 runtime services only\n");
730 		efi.runtime_version = EFI_1_10_SYSTEM_TABLE_REVISION;
731 	}
732 }
733 
734 static __initdata char memory_type_name[][13] = {
735 	"Reserved",
736 	"Loader Code",
737 	"Loader Data",
738 	"Boot Code",
739 	"Boot Data",
740 	"Runtime Code",
741 	"Runtime Data",
742 	"Conventional",
743 	"Unusable",
744 	"ACPI Reclaim",
745 	"ACPI Mem NVS",
746 	"MMIO",
747 	"MMIO Port",
748 	"PAL Code",
749 	"Persistent",
750 };
751 
efi_md_typeattr_format(char * buf,size_t size,const efi_memory_desc_t * md)752 char * __init efi_md_typeattr_format(char *buf, size_t size,
753 				     const efi_memory_desc_t *md)
754 {
755 	char *pos;
756 	int type_len;
757 	u64 attr;
758 
759 	pos = buf;
760 	if (md->type >= ARRAY_SIZE(memory_type_name))
761 		type_len = snprintf(pos, size, "[type=%u", md->type);
762 	else
763 		type_len = snprintf(pos, size, "[%-*s",
764 				    (int)(sizeof(memory_type_name[0]) - 1),
765 				    memory_type_name[md->type]);
766 	if (type_len >= size)
767 		return buf;
768 
769 	pos += type_len;
770 	size -= type_len;
771 
772 	attr = md->attribute;
773 	if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT |
774 		     EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
775 		     EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
776 		     EFI_MEMORY_NV | EFI_MEMORY_SP | EFI_MEMORY_CPU_CRYPTO |
777 		     EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE))
778 		snprintf(pos, size, "|attr=0x%016llx]",
779 			 (unsigned long long)attr);
780 	else
781 		snprintf(pos, size,
782 			 "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
783 			 attr & EFI_MEMORY_RUNTIME		? "RUN" : "",
784 			 attr & EFI_MEMORY_MORE_RELIABLE	? "MR"  : "",
785 			 attr & EFI_MEMORY_CPU_CRYPTO   	? "CC"  : "",
786 			 attr & EFI_MEMORY_SP			? "SP"  : "",
787 			 attr & EFI_MEMORY_NV			? "NV"  : "",
788 			 attr & EFI_MEMORY_XP			? "XP"  : "",
789 			 attr & EFI_MEMORY_RP			? "RP"  : "",
790 			 attr & EFI_MEMORY_WP			? "WP"  : "",
791 			 attr & EFI_MEMORY_RO			? "RO"  : "",
792 			 attr & EFI_MEMORY_UCE			? "UCE" : "",
793 			 attr & EFI_MEMORY_WB			? "WB"  : "",
794 			 attr & EFI_MEMORY_WT			? "WT"  : "",
795 			 attr & EFI_MEMORY_WC			? "WC"  : "",
796 			 attr & EFI_MEMORY_UC			? "UC"  : "");
797 	return buf;
798 }
799 
800 /*
801  * IA64 has a funky EFI memory map that doesn't work the same way as
802  * other architectures.
803  */
804 #ifndef CONFIG_IA64
805 /*
806  * efi_mem_attributes - lookup memmap attributes for physical address
807  * @phys_addr: the physical address to lookup
808  *
809  * Search in the EFI memory map for the region covering
810  * @phys_addr. Returns the EFI memory attributes if the region
811  * was found in the memory map, 0 otherwise.
812  */
efi_mem_attributes(unsigned long phys_addr)813 u64 efi_mem_attributes(unsigned long phys_addr)
814 {
815 	efi_memory_desc_t *md;
816 
817 	if (!efi_enabled(EFI_MEMMAP))
818 		return 0;
819 
820 	for_each_efi_memory_desc(md) {
821 		if ((md->phys_addr <= phys_addr) &&
822 		    (phys_addr < (md->phys_addr +
823 		    (md->num_pages << EFI_PAGE_SHIFT))))
824 			return md->attribute;
825 	}
826 	return 0;
827 }
828 
829 /*
830  * efi_mem_type - lookup memmap type for physical address
831  * @phys_addr: the physical address to lookup
832  *
833  * Search in the EFI memory map for the region covering @phys_addr.
834  * Returns the EFI memory type if the region was found in the memory
835  * map, -EINVAL otherwise.
836  */
efi_mem_type(unsigned long phys_addr)837 int efi_mem_type(unsigned long phys_addr)
838 {
839 	const efi_memory_desc_t *md;
840 
841 	if (!efi_enabled(EFI_MEMMAP))
842 		return -ENOTSUPP;
843 
844 	for_each_efi_memory_desc(md) {
845 		if ((md->phys_addr <= phys_addr) &&
846 		    (phys_addr < (md->phys_addr +
847 				  (md->num_pages << EFI_PAGE_SHIFT))))
848 			return md->type;
849 	}
850 	return -EINVAL;
851 }
852 #endif
853 
efi_status_to_err(efi_status_t status)854 int efi_status_to_err(efi_status_t status)
855 {
856 	int err;
857 
858 	switch (status) {
859 	case EFI_SUCCESS:
860 		err = 0;
861 		break;
862 	case EFI_INVALID_PARAMETER:
863 		err = -EINVAL;
864 		break;
865 	case EFI_OUT_OF_RESOURCES:
866 		err = -ENOSPC;
867 		break;
868 	case EFI_DEVICE_ERROR:
869 		err = -EIO;
870 		break;
871 	case EFI_WRITE_PROTECTED:
872 		err = -EROFS;
873 		break;
874 	case EFI_SECURITY_VIOLATION:
875 		err = -EACCES;
876 		break;
877 	case EFI_NOT_FOUND:
878 		err = -ENOENT;
879 		break;
880 	case EFI_ABORTED:
881 		err = -EINTR;
882 		break;
883 	default:
884 		err = -EINVAL;
885 	}
886 
887 	return err;
888 }
889 
890 static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
891 static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init;
892 
efi_memreserve_map_root(void)893 static int __init efi_memreserve_map_root(void)
894 {
895 	if (mem_reserve == EFI_INVALID_TABLE_ADDR)
896 		return -ENODEV;
897 
898 	efi_memreserve_root = memremap(mem_reserve,
899 				       sizeof(*efi_memreserve_root),
900 				       MEMREMAP_WB);
901 	if (WARN_ON_ONCE(!efi_memreserve_root))
902 		return -ENOMEM;
903 	return 0;
904 }
905 
efi_mem_reserve_iomem(phys_addr_t addr,u64 size)906 static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
907 {
908 	struct resource *res, *parent;
909 	int ret;
910 
911 	res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
912 	if (!res)
913 		return -ENOMEM;
914 
915 	res->name	= "reserved";
916 	res->flags	= IORESOURCE_MEM;
917 	res->start	= addr;
918 	res->end	= addr + size - 1;
919 
920 	/* we expect a conflict with a 'System RAM' region */
921 	parent = request_resource_conflict(&iomem_resource, res);
922 	ret = parent ? request_resource(parent, res) : 0;
923 
924 	/*
925 	 * Given that efi_mem_reserve_iomem() can be called at any
926 	 * time, only call memblock_reserve() if the architecture
927 	 * keeps the infrastructure around.
928 	 */
929 	if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !ret)
930 		memblock_reserve(addr, size);
931 
932 	return ret;
933 }
934 
efi_mem_reserve_persistent(phys_addr_t addr,u64 size)935 int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
936 {
937 	struct linux_efi_memreserve *rsv;
938 	unsigned long prsv;
939 	int rc, index;
940 
941 	if (efi_memreserve_root == (void *)ULONG_MAX)
942 		return -ENODEV;
943 
944 	if (!efi_memreserve_root) {
945 		rc = efi_memreserve_map_root();
946 		if (rc)
947 			return rc;
948 	}
949 
950 	/* first try to find a slot in an existing linked list entry */
951 	for (prsv = efi_memreserve_root->next; prsv; ) {
952 		rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
953 		if (!rsv)
954 			return -ENOMEM;
955 		index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
956 		if (index < rsv->size) {
957 			rsv->entry[index].base = addr;
958 			rsv->entry[index].size = size;
959 
960 			memunmap(rsv);
961 			return efi_mem_reserve_iomem(addr, size);
962 		}
963 		prsv = rsv->next;
964 		memunmap(rsv);
965 	}
966 
967 	/* no slot found - allocate a new linked list entry */
968 	rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC);
969 	if (!rsv)
970 		return -ENOMEM;
971 
972 	rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K);
973 	if (rc) {
974 		free_page((unsigned long)rsv);
975 		return rc;
976 	}
977 
978 	/*
979 	 * The memremap() call above assumes that a linux_efi_memreserve entry
980 	 * never crosses a page boundary, so let's ensure that this remains true
981 	 * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by
982 	 * using SZ_4K explicitly in the size calculation below.
983 	 */
984 	rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K);
985 	atomic_set(&rsv->count, 1);
986 	rsv->entry[0].base = addr;
987 	rsv->entry[0].size = size;
988 
989 	spin_lock(&efi_mem_reserve_persistent_lock);
990 	rsv->next = efi_memreserve_root->next;
991 	efi_memreserve_root->next = __pa(rsv);
992 	spin_unlock(&efi_mem_reserve_persistent_lock);
993 
994 	return efi_mem_reserve_iomem(addr, size);
995 }
996 
efi_memreserve_root_init(void)997 static int __init efi_memreserve_root_init(void)
998 {
999 	if (efi_memreserve_root)
1000 		return 0;
1001 	if (efi_memreserve_map_root())
1002 		efi_memreserve_root = (void *)ULONG_MAX;
1003 	return 0;
1004 }
1005 early_initcall(efi_memreserve_root_init);
1006 
1007 #ifdef CONFIG_KEXEC
update_efi_random_seed(struct notifier_block * nb,unsigned long code,void * unused)1008 static int update_efi_random_seed(struct notifier_block *nb,
1009 				  unsigned long code, void *unused)
1010 {
1011 	struct linux_efi_random_seed *seed;
1012 	u32 size = 0;
1013 
1014 	if (!kexec_in_progress)
1015 		return NOTIFY_DONE;
1016 
1017 	seed = memremap(efi_rng_seed, sizeof(*seed), MEMREMAP_WB);
1018 	if (seed != NULL) {
1019 		size = min(seed->size, EFI_RANDOM_SEED_SIZE);
1020 		memunmap(seed);
1021 	} else {
1022 		pr_err("Could not map UEFI random seed!\n");
1023 	}
1024 	if (size > 0) {
1025 		seed = memremap(efi_rng_seed, sizeof(*seed) + size,
1026 				MEMREMAP_WB);
1027 		if (seed != NULL) {
1028 			seed->size = size;
1029 			get_random_bytes(seed->bits, seed->size);
1030 			memunmap(seed);
1031 		} else {
1032 			pr_err("Could not map UEFI random seed!\n");
1033 		}
1034 	}
1035 	return NOTIFY_DONE;
1036 }
1037 
1038 static struct notifier_block efi_random_seed_nb = {
1039 	.notifier_call = update_efi_random_seed,
1040 };
1041 
register_update_efi_random_seed(void)1042 static int __init register_update_efi_random_seed(void)
1043 {
1044 	if (efi_rng_seed == EFI_INVALID_TABLE_ADDR)
1045 		return 0;
1046 	return register_reboot_notifier(&efi_random_seed_nb);
1047 }
1048 late_initcall(register_update_efi_random_seed);
1049 #endif
1050