• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012 ARM Ltd.
4  */
5 #ifndef __ASM_MODULE_H
6 #define __ASM_MODULE_H
7 
8 #include <asm-generic/module.h>
9 
10 #ifdef CONFIG_KVM
11 struct pkvm_module_section {
12 	void *start;
13 	void *end;
14 };
15 
16 typedef s32 kvm_nvhe_reloc_t;
17 struct pkvm_module_ops;
18 
19 struct pkvm_el2_module {
20 	struct pkvm_module_section text;
21 	struct pkvm_module_section bss;
22 	struct pkvm_module_section rodata;
23 	struct pkvm_module_section data;
24 	struct pkvm_module_section event_ids;
25 	struct pkvm_module_section sections;
26 	struct hyp_event *hyp_events;
27 	unsigned int nr_hyp_events;
28 	kvm_nvhe_reloc_t *relocs;
29 	struct list_head node;
30 	unsigned long token;
31 	unsigned int nr_relocs;
32 	int (*init)(const struct pkvm_module_ops *ops);
33 };
34 
35 void kvm_apply_hyp_module_relocations(void *mod_start, void *hyp_va,
36 				      kvm_nvhe_reloc_t *begin,
37 				      kvm_nvhe_reloc_t *end);
38 
39 #define ARM64_MODULE_KVM_ARCHDATA					\
40 	/* For pKVM hypervisor modules */				\
41 	struct pkvm_el2_module	hyp;
42 #else
43 #define ARM64_MODULE_KVM_ARCHDATA
44 #endif
45 
46 struct mod_plt_sec {
47 	int			plt_shndx;
48 	int			plt_num_entries;
49 	int			plt_max_entries;
50 };
51 
52 struct mod_arch_specific {
53 	struct mod_plt_sec	core;
54 	struct mod_plt_sec	init;
55 
56 	/* for CONFIG_DYNAMIC_FTRACE */
57 	struct plt_entry	*ftrace_trampolines;
58 
59 	ARM64_MODULE_KVM_ARCHDATA
60 };
61 
62 u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs,
63 			  void *loc, const Elf64_Rela *rela,
64 			  Elf64_Sym *sym);
65 
66 u64 module_emit_veneer_for_adrp(struct module *mod, Elf64_Shdr *sechdrs,
67 				void *loc, u64 val);
68 
69 struct plt_entry {
70 	/*
71 	 * A program that conforms to the AArch64 Procedure Call Standard
72 	 * (AAPCS64) must assume that a veneer that alters IP0 (x16) and/or
73 	 * IP1 (x17) may be inserted at any branch instruction that is
74 	 * exposed to a relocation that supports long branches. Since that
75 	 * is exactly what we are dealing with here, we are free to use x16
76 	 * as a scratch register in the PLT veneers.
77 	 */
78 	__le32	adrp;	/* adrp	x16, ....			*/
79 	__le32	add;	/* add	x16, x16, #0x....		*/
80 	__le32	br;	/* br	x16				*/
81 };
82 
is_forbidden_offset_for_adrp(void * place)83 static inline bool is_forbidden_offset_for_adrp(void *place)
84 {
85 	return IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) &&
86 	       cpus_have_const_cap(ARM64_WORKAROUND_843419) &&
87 	       ((u64)place & 0xfff) >= 0xff8;
88 }
89 
90 struct plt_entry get_plt_entry(u64 dst, void *pc);
91 
find_section(const Elf_Ehdr * hdr,const Elf_Shdr * sechdrs,const char * name)92 static inline const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
93 				    const Elf_Shdr *sechdrs,
94 				    const char *name)
95 {
96 	const Elf_Shdr *s, *se;
97 	const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
98 
99 	for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
100 		if (strcmp(name, secstrs + s->sh_name) == 0)
101 			return s;
102 	}
103 
104 	return NULL;
105 }
106 
107 #endif /* __ASM_MODULE_H */
108