1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * efi.c - EFI subsystem
4 *
5 * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com>
6 * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
7 * Copyright (C) 2013 Tom Gundersen <teg@jklm.no>
8 *
9 * This code registers /sys/firmware/efi{,/efivars} when EFI is supported,
10 * allowing the efivarfs to be mounted or the efivars module to be loaded.
11 * The existance of /sys/firmware/efi may also be used by userspace to
12 * determine that the system supports EFI.
13 */
14
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17 #include <linux/kobject.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/debugfs.h>
21 #include <linux/device.h>
22 #include <linux/efi.h>
23 #include <linux/of.h>
24 #include <linux/io.h>
25 #include <linux/kexec.h>
26 #include <linux/platform_device.h>
27 #include <linux/random.h>
28 #include <linux/reboot.h>
29 #include <linux/slab.h>
30 #include <linux/acpi.h>
31 #include <linux/ucs2_string.h>
32 #include <linux/memblock.h>
33 #include <linux/security.h>
34
35 #include <asm/early_ioremap.h>
36
37 struct efi __read_mostly efi = {
38 .runtime_supported_mask = EFI_RT_SUPPORTED_ALL,
39 .acpi = EFI_INVALID_TABLE_ADDR,
40 .acpi20 = EFI_INVALID_TABLE_ADDR,
41 .smbios = EFI_INVALID_TABLE_ADDR,
42 .smbios3 = EFI_INVALID_TABLE_ADDR,
43 .esrt = EFI_INVALID_TABLE_ADDR,
44 .tpm_log = EFI_INVALID_TABLE_ADDR,
45 .tpm_final_log = EFI_INVALID_TABLE_ADDR,
46 #ifdef CONFIG_LOAD_UEFI_KEYS
47 .mokvar_table = EFI_INVALID_TABLE_ADDR,
48 #endif
49 };
50 EXPORT_SYMBOL(efi);
51
52 unsigned long __ro_after_init efi_rng_seed = EFI_INVALID_TABLE_ADDR;
53 static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR;
54 static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR;
55
56 struct mm_struct efi_mm = {
57 .mm_rb = RB_ROOT,
58 .mm_users = ATOMIC_INIT(2),
59 .mm_count = ATOMIC_INIT(1),
60 .write_protect_seq = SEQCNT_ZERO(efi_mm.write_protect_seq),
61 MMAP_LOCK_INITIALIZER(efi_mm)
62 .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
63 .mmlist = LIST_HEAD_INIT(efi_mm.mmlist),
64 .cpu_bitmap = { [BITS_TO_LONGS(NR_CPUS)] = 0},
65 };
66
67 struct workqueue_struct *efi_rts_wq;
68
69 static bool disable_runtime;
setup_noefi(char * arg)70 static int __init setup_noefi(char *arg)
71 {
72 disable_runtime = true;
73 return 0;
74 }
75 early_param("noefi", setup_noefi);
76
efi_runtime_disabled(void)77 bool efi_runtime_disabled(void)
78 {
79 return disable_runtime;
80 }
81
__efi_soft_reserve_enabled(void)82 bool __pure __efi_soft_reserve_enabled(void)
83 {
84 return !efi_enabled(EFI_MEM_NO_SOFT_RESERVE);
85 }
86
parse_efi_cmdline(char * str)87 static int __init parse_efi_cmdline(char *str)
88 {
89 if (!str) {
90 pr_warn("need at least one option\n");
91 return -EINVAL;
92 }
93
94 if (parse_option_str(str, "debug"))
95 set_bit(EFI_DBG, &efi.flags);
96
97 if (parse_option_str(str, "noruntime"))
98 disable_runtime = true;
99
100 if (parse_option_str(str, "nosoftreserve"))
101 set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags);
102
103 return 0;
104 }
105 early_param("efi", parse_efi_cmdline);
106
107 struct kobject *efi_kobj;
108
109 /*
110 * Let's not leave out systab information that snuck into
111 * the efivars driver
112 * Note, do not add more fields in systab sysfs file as it breaks sysfs
113 * one value per file rule!
114 */
systab_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)115 static ssize_t systab_show(struct kobject *kobj,
116 struct kobj_attribute *attr, char *buf)
117 {
118 char *str = buf;
119
120 if (!kobj || !buf)
121 return -EINVAL;
122
123 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
124 str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20);
125 if (efi.acpi != EFI_INVALID_TABLE_ADDR)
126 str += sprintf(str, "ACPI=0x%lx\n", efi.acpi);
127 /*
128 * If both SMBIOS and SMBIOS3 entry points are implemented, the
129 * SMBIOS3 entry point shall be preferred, so we list it first to
130 * let applications stop parsing after the first match.
131 */
132 if (efi.smbios3 != EFI_INVALID_TABLE_ADDR)
133 str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3);
134 if (efi.smbios != EFI_INVALID_TABLE_ADDR)
135 str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
136
137 if (IS_ENABLED(CONFIG_IA64) || IS_ENABLED(CONFIG_X86))
138 str = efi_systab_show_arch(str);
139
140 return str - buf;
141 }
142
143 static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400);
144
fw_platform_size_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)145 static ssize_t fw_platform_size_show(struct kobject *kobj,
146 struct kobj_attribute *attr, char *buf)
147 {
148 return sprintf(buf, "%d\n", efi_enabled(EFI_64BIT) ? 64 : 32);
149 }
150
151 extern __weak struct kobj_attribute efi_attr_fw_vendor;
152 extern __weak struct kobj_attribute efi_attr_runtime;
153 extern __weak struct kobj_attribute efi_attr_config_table;
154 static struct kobj_attribute efi_attr_fw_platform_size =
155 __ATTR_RO(fw_platform_size);
156
157 static struct attribute *efi_subsys_attrs[] = {
158 &efi_attr_systab.attr,
159 &efi_attr_fw_platform_size.attr,
160 &efi_attr_fw_vendor.attr,
161 &efi_attr_runtime.attr,
162 &efi_attr_config_table.attr,
163 NULL,
164 };
165
efi_attr_is_visible(struct kobject * kobj,struct attribute * attr,int n)166 umode_t __weak efi_attr_is_visible(struct kobject *kobj, struct attribute *attr,
167 int n)
168 {
169 return attr->mode;
170 }
171
172 static const struct attribute_group efi_subsys_attr_group = {
173 .attrs = efi_subsys_attrs,
174 .is_visible = efi_attr_is_visible,
175 };
176
177 static struct efivars generic_efivars;
178 static struct efivar_operations generic_ops;
179
generic_ops_register(void)180 static int generic_ops_register(void)
181 {
182 generic_ops.get_variable = efi.get_variable;
183 generic_ops.get_next_variable = efi.get_next_variable;
184 generic_ops.query_variable_store = efi_query_variable_store;
185
186 if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) {
187 generic_ops.set_variable = efi.set_variable;
188 generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
189 }
190 return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
191 }
192
generic_ops_unregister(void)193 static void generic_ops_unregister(void)
194 {
195 efivars_unregister(&generic_efivars);
196 }
197
198 #ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS
199 #define EFIVAR_SSDT_NAME_MAX 16
200 static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata;
efivar_ssdt_setup(char * str)201 static int __init efivar_ssdt_setup(char *str)
202 {
203 int ret = security_locked_down(LOCKDOWN_ACPI_TABLES);
204
205 if (ret)
206 return ret;
207
208 if (strlen(str) < sizeof(efivar_ssdt))
209 memcpy(efivar_ssdt, str, strlen(str));
210 else
211 pr_warn("efivar_ssdt: name too long: %s\n", str);
212 return 0;
213 }
214 __setup("efivar_ssdt=", efivar_ssdt_setup);
215
efivar_ssdt_iter(efi_char16_t * name,efi_guid_t vendor,unsigned long name_size,void * data)216 static __init int efivar_ssdt_iter(efi_char16_t *name, efi_guid_t vendor,
217 unsigned long name_size, void *data)
218 {
219 struct efivar_entry *entry;
220 struct list_head *list = data;
221 char utf8_name[EFIVAR_SSDT_NAME_MAX];
222 int limit = min_t(unsigned long, EFIVAR_SSDT_NAME_MAX, name_size);
223
224 ucs2_as_utf8(utf8_name, name, limit - 1);
225 if (strncmp(utf8_name, efivar_ssdt, limit) != 0)
226 return 0;
227
228 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
229 if (!entry)
230 return 0;
231
232 memcpy(entry->var.VariableName, name, name_size);
233 memcpy(&entry->var.VendorGuid, &vendor, sizeof(efi_guid_t));
234
235 efivar_entry_add(entry, list);
236
237 return 0;
238 }
239
efivar_ssdt_load(void)240 static __init int efivar_ssdt_load(void)
241 {
242 LIST_HEAD(entries);
243 struct efivar_entry *entry, *aux;
244 unsigned long size;
245 void *data;
246 int ret;
247
248 if (!efivar_ssdt[0])
249 return 0;
250
251 ret = efivar_init(efivar_ssdt_iter, &entries, true, &entries);
252
253 list_for_each_entry_safe(entry, aux, &entries, list) {
254 pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt,
255 &entry->var.VendorGuid);
256
257 list_del(&entry->list);
258
259 ret = efivar_entry_size(entry, &size);
260 if (ret) {
261 pr_err("failed to get var size\n");
262 goto free_entry;
263 }
264
265 data = kmalloc(size, GFP_KERNEL);
266 if (!data) {
267 ret = -ENOMEM;
268 goto free_entry;
269 }
270
271 ret = efivar_entry_get(entry, NULL, &size, data);
272 if (ret) {
273 pr_err("failed to get var data\n");
274 goto free_data;
275 }
276
277 ret = acpi_load_table(data, NULL);
278 if (ret) {
279 pr_err("failed to load table: %d\n", ret);
280 goto free_data;
281 }
282
283 goto free_entry;
284
285 free_data:
286 kfree(data);
287
288 free_entry:
289 kfree(entry);
290 }
291
292 return ret;
293 }
294 #else
efivar_ssdt_load(void)295 static inline int efivar_ssdt_load(void) { return 0; }
296 #endif
297
298 #ifdef CONFIG_DEBUG_FS
299
300 #define EFI_DEBUGFS_MAX_BLOBS 32
301
302 static struct debugfs_blob_wrapper debugfs_blob[EFI_DEBUGFS_MAX_BLOBS];
303
efi_debugfs_init(void)304 static void __init efi_debugfs_init(void)
305 {
306 struct dentry *efi_debugfs;
307 efi_memory_desc_t *md;
308 char name[32];
309 int type_count[EFI_BOOT_SERVICES_DATA + 1] = {};
310 int i = 0;
311
312 efi_debugfs = debugfs_create_dir("efi", NULL);
313 if (IS_ERR_OR_NULL(efi_debugfs))
314 return;
315
316 for_each_efi_memory_desc(md) {
317 switch (md->type) {
318 case EFI_BOOT_SERVICES_CODE:
319 snprintf(name, sizeof(name), "boot_services_code%d",
320 type_count[md->type]++);
321 break;
322 case EFI_BOOT_SERVICES_DATA:
323 snprintf(name, sizeof(name), "boot_services_data%d",
324 type_count[md->type]++);
325 break;
326 default:
327 continue;
328 }
329
330 if (i >= EFI_DEBUGFS_MAX_BLOBS) {
331 pr_warn("More then %d EFI boot service segments, only showing first %d in debugfs\n",
332 EFI_DEBUGFS_MAX_BLOBS, EFI_DEBUGFS_MAX_BLOBS);
333 break;
334 }
335
336 debugfs_blob[i].size = md->num_pages << EFI_PAGE_SHIFT;
337 debugfs_blob[i].data = memremap(md->phys_addr,
338 debugfs_blob[i].size,
339 MEMREMAP_WB);
340 if (!debugfs_blob[i].data)
341 continue;
342
343 debugfs_create_blob(name, 0400, efi_debugfs, &debugfs_blob[i]);
344 i++;
345 }
346 }
347 #else
efi_debugfs_init(void)348 static inline void efi_debugfs_init(void) {}
349 #endif
350
351 /*
352 * We register the efi subsystem with the firmware subsystem and the
353 * efivars subsystem with the efi subsystem, if the system was booted with
354 * EFI.
355 */
efisubsys_init(void)356 static int __init efisubsys_init(void)
357 {
358 int error;
359
360 if (!efi_enabled(EFI_RUNTIME_SERVICES))
361 efi.runtime_supported_mask = 0;
362
363 if (!efi_enabled(EFI_BOOT))
364 return 0;
365
366 if (efi.runtime_supported_mask) {
367 /*
368 * Since we process only one efi_runtime_service() at a time, an
369 * ordered workqueue (which creates only one execution context)
370 * should suffice for all our needs.
371 */
372 efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0);
373 if (!efi_rts_wq) {
374 pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n");
375 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
376 efi.runtime_supported_mask = 0;
377 return 0;
378 }
379 }
380
381 if (efi_rt_services_supported(EFI_RT_SUPPORTED_TIME_SERVICES))
382 platform_device_register_simple("rtc-efi", 0, NULL, 0);
383
384 /* We register the efi directory at /sys/firmware/efi */
385 efi_kobj = kobject_create_and_add("efi", firmware_kobj);
386 if (!efi_kobj) {
387 pr_err("efi: Firmware registration failed.\n");
388 destroy_workqueue(efi_rts_wq);
389 return -ENOMEM;
390 }
391
392 if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
393 EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) {
394 error = generic_ops_register();
395 if (error)
396 goto err_put;
397 efivar_ssdt_load();
398 platform_device_register_simple("efivars", 0, NULL, 0);
399 }
400
401 error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group);
402 if (error) {
403 pr_err("efi: Sysfs attribute export failed with error %d.\n",
404 error);
405 goto err_unregister;
406 }
407
408 error = efi_runtime_map_init(efi_kobj);
409 if (error)
410 goto err_remove_group;
411
412 /* and the standard mountpoint for efivarfs */
413 error = sysfs_create_mount_point(efi_kobj, "efivars");
414 if (error) {
415 pr_err("efivars: Subsystem registration failed.\n");
416 goto err_remove_group;
417 }
418
419 if (efi_enabled(EFI_DBG) && efi_enabled(EFI_PRESERVE_BS_REGIONS))
420 efi_debugfs_init();
421
422 return 0;
423
424 err_remove_group:
425 sysfs_remove_group(efi_kobj, &efi_subsys_attr_group);
426 err_unregister:
427 if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
428 EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME))
429 generic_ops_unregister();
430 err_put:
431 kobject_put(efi_kobj);
432 destroy_workqueue(efi_rts_wq);
433 return error;
434 }
435
436 subsys_initcall(efisubsys_init);
437
438 /*
439 * Find the efi memory descriptor for a given physical address. Given a
440 * physical address, determine if it exists within an EFI Memory Map entry,
441 * and if so, populate the supplied memory descriptor with the appropriate
442 * data.
443 */
efi_mem_desc_lookup(u64 phys_addr,efi_memory_desc_t * out_md)444 int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
445 {
446 efi_memory_desc_t *md;
447
448 if (!efi_enabled(EFI_MEMMAP)) {
449 pr_err_once("EFI_MEMMAP is not enabled.\n");
450 return -EINVAL;
451 }
452
453 if (!out_md) {
454 pr_err_once("out_md is null.\n");
455 return -EINVAL;
456 }
457
458 for_each_efi_memory_desc(md) {
459 u64 size;
460 u64 end;
461
462 size = md->num_pages << EFI_PAGE_SHIFT;
463 end = md->phys_addr + size;
464 if (phys_addr >= md->phys_addr && phys_addr < end) {
465 memcpy(out_md, md, sizeof(*out_md));
466 return 0;
467 }
468 }
469 return -ENOENT;
470 }
471
472 /*
473 * Calculate the highest address of an efi memory descriptor.
474 */
efi_mem_desc_end(efi_memory_desc_t * md)475 u64 __init efi_mem_desc_end(efi_memory_desc_t *md)
476 {
477 u64 size = md->num_pages << EFI_PAGE_SHIFT;
478 u64 end = md->phys_addr + size;
479 return end;
480 }
481
efi_arch_mem_reserve(phys_addr_t addr,u64 size)482 void __init __weak efi_arch_mem_reserve(phys_addr_t addr, u64 size) {}
483
484 /**
485 * efi_mem_reserve - Reserve an EFI memory region
486 * @addr: Physical address to reserve
487 * @size: Size of reservation
488 *
489 * Mark a region as reserved from general kernel allocation and
490 * prevent it being released by efi_free_boot_services().
491 *
492 * This function should be called drivers once they've parsed EFI
493 * configuration tables to figure out where their data lives, e.g.
494 * efi_esrt_init().
495 */
efi_mem_reserve(phys_addr_t addr,u64 size)496 void __init efi_mem_reserve(phys_addr_t addr, u64 size)
497 {
498 if (!memblock_is_region_reserved(addr, size))
499 memblock_reserve(addr, size);
500
501 /*
502 * Some architectures (x86) reserve all boot services ranges
503 * until efi_free_boot_services() because of buggy firmware
504 * implementations. This means the above memblock_reserve() is
505 * superfluous on x86 and instead what it needs to do is
506 * ensure the @start, @size is not freed.
507 */
508 efi_arch_mem_reserve(addr, size);
509 }
510
511 static const efi_config_table_type_t common_tables[] __initconst = {
512 {ACPI_20_TABLE_GUID, &efi.acpi20, "ACPI 2.0" },
513 {ACPI_TABLE_GUID, &efi.acpi, "ACPI" },
514 {SMBIOS_TABLE_GUID, &efi.smbios, "SMBIOS" },
515 {SMBIOS3_TABLE_GUID, &efi.smbios3, "SMBIOS 3.0" },
516 {EFI_SYSTEM_RESOURCE_TABLE_GUID, &efi.esrt, "ESRT" },
517 {EFI_MEMORY_ATTRIBUTES_TABLE_GUID, &efi_mem_attr_table, "MEMATTR" },
518 {LINUX_EFI_RANDOM_SEED_TABLE_GUID, &efi_rng_seed, "RNG" },
519 {LINUX_EFI_TPM_EVENT_LOG_GUID, &efi.tpm_log, "TPMEventLog" },
520 {LINUX_EFI_TPM_FINAL_LOG_GUID, &efi.tpm_final_log, "TPMFinalLog" },
521 {LINUX_EFI_MEMRESERVE_TABLE_GUID, &mem_reserve, "MEMRESERVE" },
522 {EFI_RT_PROPERTIES_TABLE_GUID, &rt_prop, "RTPROP" },
523 #ifdef CONFIG_EFI_RCI2_TABLE
524 {DELLEMC_EFI_RCI2_TABLE_GUID, &rci2_table_phys },
525 #endif
526 #ifdef CONFIG_LOAD_UEFI_KEYS
527 {LINUX_EFI_MOK_VARIABLE_TABLE_GUID, &efi.mokvar_table, "MOKvar" },
528 #endif
529 {},
530 };
531
match_config_table(const efi_guid_t * guid,unsigned long table,const efi_config_table_type_t * table_types)532 static __init int match_config_table(const efi_guid_t *guid,
533 unsigned long table,
534 const efi_config_table_type_t *table_types)
535 {
536 int i;
537
538 for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) {
539 if (!efi_guidcmp(*guid, table_types[i].guid)) {
540 *(table_types[i].ptr) = table;
541 if (table_types[i].name[0])
542 pr_cont("%s=0x%lx ",
543 table_types[i].name, table);
544 return 1;
545 }
546 }
547
548 return 0;
549 }
550
efi_config_parse_tables(const efi_config_table_t * config_tables,int count,const efi_config_table_type_t * arch_tables)551 int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
552 int count,
553 const efi_config_table_type_t *arch_tables)
554 {
555 const efi_config_table_64_t *tbl64 = (void *)config_tables;
556 const efi_config_table_32_t *tbl32 = (void *)config_tables;
557 const efi_guid_t *guid;
558 unsigned long table;
559 int i;
560
561 pr_info("");
562 for (i = 0; i < count; i++) {
563 if (!IS_ENABLED(CONFIG_X86)) {
564 guid = &config_tables[i].guid;
565 table = (unsigned long)config_tables[i].table;
566 } else if (efi_enabled(EFI_64BIT)) {
567 guid = &tbl64[i].guid;
568 table = tbl64[i].table;
569
570 if (IS_ENABLED(CONFIG_X86_32) &&
571 tbl64[i].table > U32_MAX) {
572 pr_cont("\n");
573 pr_err("Table located above 4GB, disabling EFI.\n");
574 return -EINVAL;
575 }
576 } else {
577 guid = &tbl32[i].guid;
578 table = tbl32[i].table;
579 }
580
581 if (!match_config_table(guid, table, common_tables) && arch_tables)
582 match_config_table(guid, table, arch_tables);
583 }
584 pr_cont("\n");
585 set_bit(EFI_CONFIG_TABLES, &efi.flags);
586
587 if (efi_rng_seed != EFI_INVALID_TABLE_ADDR) {
588 struct linux_efi_random_seed *seed;
589 u32 size = 0;
590
591 seed = early_memremap(efi_rng_seed, sizeof(*seed));
592 if (seed != NULL) {
593 size = READ_ONCE(seed->size);
594 early_memunmap(seed, sizeof(*seed));
595 } else {
596 pr_err("Could not map UEFI random seed!\n");
597 }
598 if (size > 0) {
599 seed = early_memremap(efi_rng_seed,
600 sizeof(*seed) + size);
601 if (seed != NULL) {
602 pr_notice("seeding entropy pool\n");
603 add_bootloader_randomness(seed->bits, size);
604 early_memunmap(seed, sizeof(*seed) + size);
605 } else {
606 pr_err("Could not map UEFI random seed!\n");
607 }
608 }
609 }
610
611 if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP))
612 efi_memattr_init();
613
614 efi_tpm_eventlog_init();
615
616 if (mem_reserve != EFI_INVALID_TABLE_ADDR) {
617 unsigned long prsv = mem_reserve;
618
619 while (prsv) {
620 struct linux_efi_memreserve *rsv;
621 u8 *p;
622
623 /*
624 * Just map a full page: that is what we will get
625 * anyway, and it permits us to map the entire entry
626 * before knowing its size.
627 */
628 p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE),
629 PAGE_SIZE);
630 if (p == NULL) {
631 pr_err("Could not map UEFI memreserve entry!\n");
632 return -ENOMEM;
633 }
634
635 rsv = (void *)(p + prsv % PAGE_SIZE);
636
637 /* reserve the entry itself */
638 memblock_reserve(prsv,
639 struct_size(rsv, entry, rsv->size));
640
641 for (i = 0; i < atomic_read(&rsv->count); i++) {
642 memblock_reserve(rsv->entry[i].base,
643 rsv->entry[i].size);
644 }
645
646 prsv = rsv->next;
647 early_memunmap(p, PAGE_SIZE);
648 }
649 }
650
651 if (rt_prop != EFI_INVALID_TABLE_ADDR) {
652 efi_rt_properties_table_t *tbl;
653
654 tbl = early_memremap(rt_prop, sizeof(*tbl));
655 if (tbl) {
656 efi.runtime_supported_mask &= tbl->runtime_services_supported;
657 early_memunmap(tbl, sizeof(*tbl));
658 }
659 }
660
661 return 0;
662 }
663
efi_systab_check_header(const efi_table_hdr_t * systab_hdr,int min_major_version)664 int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr,
665 int min_major_version)
666 {
667 if (systab_hdr->signature != EFI_SYSTEM_TABLE_SIGNATURE) {
668 pr_err("System table signature incorrect!\n");
669 return -EINVAL;
670 }
671
672 if ((systab_hdr->revision >> 16) < min_major_version)
673 pr_err("Warning: System table version %d.%02d, expected %d.00 or greater!\n",
674 systab_hdr->revision >> 16,
675 systab_hdr->revision & 0xffff,
676 min_major_version);
677
678 return 0;
679 }
680
681 #ifndef CONFIG_IA64
map_fw_vendor(unsigned long fw_vendor,size_t size)682 static const efi_char16_t *__init map_fw_vendor(unsigned long fw_vendor,
683 size_t size)
684 {
685 const efi_char16_t *ret;
686
687 ret = early_memremap_ro(fw_vendor, size);
688 if (!ret)
689 pr_err("Could not map the firmware vendor!\n");
690 return ret;
691 }
692
unmap_fw_vendor(const void * fw_vendor,size_t size)693 static void __init unmap_fw_vendor(const void *fw_vendor, size_t size)
694 {
695 early_memunmap((void *)fw_vendor, size);
696 }
697 #else
698 #define map_fw_vendor(p, s) __va(p)
699 #define unmap_fw_vendor(v, s)
700 #endif
701
efi_systab_report_header(const efi_table_hdr_t * systab_hdr,unsigned long fw_vendor)702 void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
703 unsigned long fw_vendor)
704 {
705 char vendor[100] = "unknown";
706 const efi_char16_t *c16;
707 size_t i;
708
709 c16 = map_fw_vendor(fw_vendor, sizeof(vendor) * sizeof(efi_char16_t));
710 if (c16) {
711 for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i)
712 vendor[i] = c16[i];
713 vendor[i] = '\0';
714
715 unmap_fw_vendor(c16, sizeof(vendor) * sizeof(efi_char16_t));
716 }
717
718 pr_info("EFI v%u.%.02u by %s\n",
719 systab_hdr->revision >> 16,
720 systab_hdr->revision & 0xffff,
721 vendor);
722
723 if (IS_ENABLED(CONFIG_X86_64) &&
724 systab_hdr->revision > EFI_1_10_SYSTEM_TABLE_REVISION &&
725 !strcmp(vendor, "Apple")) {
726 pr_info("Apple Mac detected, using EFI v1.10 runtime services only\n");
727 efi.runtime_version = EFI_1_10_SYSTEM_TABLE_REVISION;
728 }
729 }
730
731 static __initdata char memory_type_name[][13] = {
732 "Reserved",
733 "Loader Code",
734 "Loader Data",
735 "Boot Code",
736 "Boot Data",
737 "Runtime Code",
738 "Runtime Data",
739 "Conventional",
740 "Unusable",
741 "ACPI Reclaim",
742 "ACPI Mem NVS",
743 "MMIO",
744 "MMIO Port",
745 "PAL Code",
746 "Persistent",
747 };
748
efi_md_typeattr_format(char * buf,size_t size,const efi_memory_desc_t * md)749 char * __init efi_md_typeattr_format(char *buf, size_t size,
750 const efi_memory_desc_t *md)
751 {
752 char *pos;
753 int type_len;
754 u64 attr;
755
756 pos = buf;
757 if (md->type >= ARRAY_SIZE(memory_type_name))
758 type_len = snprintf(pos, size, "[type=%u", md->type);
759 else
760 type_len = snprintf(pos, size, "[%-*s",
761 (int)(sizeof(memory_type_name[0]) - 1),
762 memory_type_name[md->type]);
763 if (type_len >= size)
764 return buf;
765
766 pos += type_len;
767 size -= type_len;
768
769 attr = md->attribute;
770 if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT |
771 EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
772 EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
773 EFI_MEMORY_NV | EFI_MEMORY_SP | EFI_MEMORY_CPU_CRYPTO |
774 EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE))
775 snprintf(pos, size, "|attr=0x%016llx]",
776 (unsigned long long)attr);
777 else
778 snprintf(pos, size,
779 "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
780 attr & EFI_MEMORY_RUNTIME ? "RUN" : "",
781 attr & EFI_MEMORY_MORE_RELIABLE ? "MR" : "",
782 attr & EFI_MEMORY_CPU_CRYPTO ? "CC" : "",
783 attr & EFI_MEMORY_SP ? "SP" : "",
784 attr & EFI_MEMORY_NV ? "NV" : "",
785 attr & EFI_MEMORY_XP ? "XP" : "",
786 attr & EFI_MEMORY_RP ? "RP" : "",
787 attr & EFI_MEMORY_WP ? "WP" : "",
788 attr & EFI_MEMORY_RO ? "RO" : "",
789 attr & EFI_MEMORY_UCE ? "UCE" : "",
790 attr & EFI_MEMORY_WB ? "WB" : "",
791 attr & EFI_MEMORY_WT ? "WT" : "",
792 attr & EFI_MEMORY_WC ? "WC" : "",
793 attr & EFI_MEMORY_UC ? "UC" : "");
794 return buf;
795 }
796
797 /*
798 * IA64 has a funky EFI memory map that doesn't work the same way as
799 * other architectures.
800 */
801 #ifndef CONFIG_IA64
802 /*
803 * efi_mem_attributes - lookup memmap attributes for physical address
804 * @phys_addr: the physical address to lookup
805 *
806 * Search in the EFI memory map for the region covering
807 * @phys_addr. Returns the EFI memory attributes if the region
808 * was found in the memory map, 0 otherwise.
809 */
efi_mem_attributes(unsigned long phys_addr)810 u64 efi_mem_attributes(unsigned long phys_addr)
811 {
812 efi_memory_desc_t *md;
813
814 if (!efi_enabled(EFI_MEMMAP))
815 return 0;
816
817 for_each_efi_memory_desc(md) {
818 if ((md->phys_addr <= phys_addr) &&
819 (phys_addr < (md->phys_addr +
820 (md->num_pages << EFI_PAGE_SHIFT))))
821 return md->attribute;
822 }
823 return 0;
824 }
825
826 /*
827 * efi_mem_type - lookup memmap type for physical address
828 * @phys_addr: the physical address to lookup
829 *
830 * Search in the EFI memory map for the region covering @phys_addr.
831 * Returns the EFI memory type if the region was found in the memory
832 * map, -EINVAL otherwise.
833 */
efi_mem_type(unsigned long phys_addr)834 int efi_mem_type(unsigned long phys_addr)
835 {
836 const efi_memory_desc_t *md;
837
838 if (!efi_enabled(EFI_MEMMAP))
839 return -ENOTSUPP;
840
841 for_each_efi_memory_desc(md) {
842 if ((md->phys_addr <= phys_addr) &&
843 (phys_addr < (md->phys_addr +
844 (md->num_pages << EFI_PAGE_SHIFT))))
845 return md->type;
846 }
847 return -EINVAL;
848 }
849 #endif
850
efi_status_to_err(efi_status_t status)851 int efi_status_to_err(efi_status_t status)
852 {
853 int err;
854
855 switch (status) {
856 case EFI_SUCCESS:
857 err = 0;
858 break;
859 case EFI_INVALID_PARAMETER:
860 err = -EINVAL;
861 break;
862 case EFI_OUT_OF_RESOURCES:
863 err = -ENOSPC;
864 break;
865 case EFI_DEVICE_ERROR:
866 err = -EIO;
867 break;
868 case EFI_WRITE_PROTECTED:
869 err = -EROFS;
870 break;
871 case EFI_SECURITY_VIOLATION:
872 err = -EACCES;
873 break;
874 case EFI_NOT_FOUND:
875 err = -ENOENT;
876 break;
877 case EFI_ABORTED:
878 err = -EINTR;
879 break;
880 default:
881 err = -EINVAL;
882 }
883
884 return err;
885 }
886
887 static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
888 static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init;
889
efi_memreserve_map_root(void)890 static int __init efi_memreserve_map_root(void)
891 {
892 if (mem_reserve == EFI_INVALID_TABLE_ADDR)
893 return -ENODEV;
894
895 efi_memreserve_root = memremap(mem_reserve,
896 sizeof(*efi_memreserve_root),
897 MEMREMAP_WB);
898 if (WARN_ON_ONCE(!efi_memreserve_root))
899 return -ENOMEM;
900 return 0;
901 }
902
efi_mem_reserve_iomem(phys_addr_t addr,u64 size)903 static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
904 {
905 struct resource *res, *parent;
906 int ret;
907
908 res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
909 if (!res)
910 return -ENOMEM;
911
912 res->name = "reserved";
913 res->flags = IORESOURCE_MEM;
914 res->start = addr;
915 res->end = addr + size - 1;
916
917 /* we expect a conflict with a 'System RAM' region */
918 parent = request_resource_conflict(&iomem_resource, res);
919 ret = parent ? request_resource(parent, res) : 0;
920
921 /*
922 * Given that efi_mem_reserve_iomem() can be called at any
923 * time, only call memblock_reserve() if the architecture
924 * keeps the infrastructure around.
925 */
926 if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !ret)
927 memblock_reserve(addr, size);
928
929 return ret;
930 }
931
efi_mem_reserve_persistent(phys_addr_t addr,u64 size)932 int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
933 {
934 struct linux_efi_memreserve *rsv;
935 unsigned long prsv;
936 int rc, index;
937
938 if (efi_memreserve_root == (void *)ULONG_MAX)
939 return -ENODEV;
940
941 if (!efi_memreserve_root) {
942 rc = efi_memreserve_map_root();
943 if (rc)
944 return rc;
945 }
946
947 /* first try to find a slot in an existing linked list entry */
948 for (prsv = efi_memreserve_root->next; prsv; ) {
949 rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
950 index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
951 if (index < rsv->size) {
952 rsv->entry[index].base = addr;
953 rsv->entry[index].size = size;
954
955 memunmap(rsv);
956 return efi_mem_reserve_iomem(addr, size);
957 }
958 prsv = rsv->next;
959 memunmap(rsv);
960 }
961
962 /* no slot found - allocate a new linked list entry */
963 rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC);
964 if (!rsv)
965 return -ENOMEM;
966
967 rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K);
968 if (rc) {
969 free_page((unsigned long)rsv);
970 return rc;
971 }
972
973 /*
974 * The memremap() call above assumes that a linux_efi_memreserve entry
975 * never crosses a page boundary, so let's ensure that this remains true
976 * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by
977 * using SZ_4K explicitly in the size calculation below.
978 */
979 rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K);
980 atomic_set(&rsv->count, 1);
981 rsv->entry[0].base = addr;
982 rsv->entry[0].size = size;
983
984 spin_lock(&efi_mem_reserve_persistent_lock);
985 rsv->next = efi_memreserve_root->next;
986 efi_memreserve_root->next = __pa(rsv);
987 spin_unlock(&efi_mem_reserve_persistent_lock);
988
989 return efi_mem_reserve_iomem(addr, size);
990 }
991
efi_memreserve_root_init(void)992 static int __init efi_memreserve_root_init(void)
993 {
994 if (efi_memreserve_root)
995 return 0;
996 if (efi_memreserve_map_root())
997 efi_memreserve_root = (void *)ULONG_MAX;
998 return 0;
999 }
1000 early_initcall(efi_memreserve_root_init);
1001
1002 #ifdef CONFIG_KEXEC
update_efi_random_seed(struct notifier_block * nb,unsigned long code,void * unused)1003 static int update_efi_random_seed(struct notifier_block *nb,
1004 unsigned long code, void *unused)
1005 {
1006 struct linux_efi_random_seed *seed;
1007 u32 size = 0;
1008
1009 if (!kexec_in_progress)
1010 return NOTIFY_DONE;
1011
1012 seed = memremap(efi_rng_seed, sizeof(*seed), MEMREMAP_WB);
1013 if (seed != NULL) {
1014 size = min(seed->size, EFI_RANDOM_SEED_SIZE);
1015 memunmap(seed);
1016 } else {
1017 pr_err("Could not map UEFI random seed!\n");
1018 }
1019 if (size > 0) {
1020 seed = memremap(efi_rng_seed, sizeof(*seed) + size,
1021 MEMREMAP_WB);
1022 if (seed != NULL) {
1023 seed->size = size;
1024 get_random_bytes(seed->bits, seed->size);
1025 memunmap(seed);
1026 } else {
1027 pr_err("Could not map UEFI random seed!\n");
1028 }
1029 }
1030 return NOTIFY_DONE;
1031 }
1032
1033 static struct notifier_block efi_random_seed_nb = {
1034 .notifier_call = update_efi_random_seed,
1035 };
1036
register_update_efi_random_seed(void)1037 static int __init register_update_efi_random_seed(void)
1038 {
1039 if (efi_rng_seed == EFI_INVALID_TABLE_ADDR)
1040 return 0;
1041 return register_reboot_notifier(&efi_random_seed_nb);
1042 }
1043 late_initcall(register_update_efi_random_seed);
1044 #endif
1045