1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
4 */
5
6 #ifndef __ASM_ARM_EFI_H
7 #define __ASM_ARM_EFI_H
8
9 #include <asm/cacheflush.h>
10 #include <asm/cachetype.h>
11 #include <asm/early_ioremap.h>
12 #include <asm/fixmap.h>
13 #include <asm/highmem.h>
14 #include <asm/mach/map.h>
15 #include <asm/mmu_context.h>
16 #include <asm/ptrace.h>
17
18 #ifdef CONFIG_EFI
19 void efi_init(void);
20
21 int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
22 int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
23
24 #define arch_efi_call_virt_setup() efi_virtmap_load()
25 #define arch_efi_call_virt_teardown() efi_virtmap_unload()
26
27 #define arch_efi_call_virt(p, f, args...) \
28 ({ \
29 efi_##f##_t *__f; \
30 __f = p->f; \
31 __f(args); \
32 })
33
34 #define ARCH_EFI_IRQ_FLAGS_MASK \
35 (PSR_J_BIT | PSR_E_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT | \
36 PSR_T_BIT | MODE_MASK)
37
efi_set_pgd(struct mm_struct * mm)38 static inline void efi_set_pgd(struct mm_struct *mm)
39 {
40 check_and_switch_context(mm, NULL);
41 }
42
43 void efi_virtmap_load(void);
44 void efi_virtmap_unload(void);
45
46 #else
47 #define efi_init()
48 #endif /* CONFIG_EFI */
49
50 /* arch specific definitions used by the stub code */
51
52 struct screen_info *alloc_screen_info(void);
53 void free_screen_info(struct screen_info *si);
54
efifb_setup_from_dmi(struct screen_info * si,const char * opt)55 static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt)
56 {
57 }
58
59 /*
60 * A reasonable upper bound for the uncompressed kernel size is 32 MBytes,
61 * so we will reserve that amount of memory. We have no easy way to tell what
62 * the actuall size of code + data the uncompressed kernel will use.
63 * If this is insufficient, the decompressor will relocate itself out of the
64 * way before performing the decompression.
65 */
66 #define MAX_UNCOMP_KERNEL_SIZE SZ_32M
67
68 /*
69 * phys-to-virt patching requires that the physical to virtual offset fits
70 * into the immediate field of an add/sub instruction, which comes down to the
71 * 24 least significant bits being zero, and so the offset should be a multiple
72 * of 16 MB. Since PAGE_OFFSET itself is a multiple of 16 MB, the physical
73 * base should be aligned to 16 MB as well.
74 */
75 #define EFI_PHYS_ALIGN SZ_16M
76
77 /* on ARM, the FDT should be located in a lowmem region */
efi_get_max_fdt_addr(unsigned long image_addr)78 static inline unsigned long efi_get_max_fdt_addr(unsigned long image_addr)
79 {
80 return round_down(image_addr, EFI_PHYS_ALIGN) + SZ_512M;
81 }
82
83 /* on ARM, the initrd should be loaded in a lowmem region */
efi_get_max_initrd_addr(unsigned long image_addr)84 static inline unsigned long efi_get_max_initrd_addr(unsigned long image_addr)
85 {
86 return round_down(image_addr, EFI_PHYS_ALIGN) + SZ_512M;
87 }
88
89 struct efi_arm_entry_state {
90 u32 cpsr_before_ebs;
91 u32 sctlr_before_ebs;
92 u32 cpsr_after_ebs;
93 u32 sctlr_after_ebs;
94 };
95
96 #endif /* _ASM_ARM_EFI_H */
97