1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2017 ARM Ltd.
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7 #include <linux/kvm_host.h>
8 #include <linux/random.h>
9 #include <linux/memblock.h>
10 #include <asm/alternative.h>
11 #include <asm/debug-monitors.h>
12 #include <asm/insn.h>
13 #include <asm/kvm_mmu.h>
14
15 /*
16 * The LSB of the random hyp VA tag or 0 if no randomization is used.
17 */
18 static u8 tag_lsb;
19 /*
20 * The random hyp VA tag value with the region bit if hyp randomization is used
21 */
22 static u64 tag_val;
23 static u64 va_mask;
24
compute_layout(void)25 static void compute_layout(void)
26 {
27 phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start);
28 u64 hyp_va_msb;
29 int kva_msb;
30
31 /* Where is my RAM region? */
32 hyp_va_msb = idmap_addr & BIT(vabits_actual - 1);
33 hyp_va_msb ^= BIT(vabits_actual - 1);
34
35 kva_msb = fls64((u64)phys_to_virt(memblock_start_of_DRAM()) ^
36 (u64)(high_memory - 1));
37
38 if (kva_msb == (vabits_actual - 1)) {
39 /*
40 * No space in the address, let's compute the mask so
41 * that it covers (vabits_actual - 1) bits, and the region
42 * bit. The tag stays set to zero.
43 */
44 va_mask = BIT(vabits_actual - 1) - 1;
45 va_mask |= hyp_va_msb;
46 } else {
47 /*
48 * We do have some free bits to insert a random tag.
49 * Hyp VAs are now created from kernel linear map VAs
50 * using the following formula (with V == vabits_actual):
51 *
52 * 63 ... V | V-1 | V-2 .. tag_lsb | tag_lsb - 1 .. 0
53 * ---------------------------------------------------------
54 * | 0000000 | hyp_va_msb | random tag | kern linear VA |
55 */
56 tag_lsb = kva_msb;
57 va_mask = GENMASK_ULL(tag_lsb - 1, 0);
58 tag_val = get_random_long() & GENMASK_ULL(vabits_actual - 2, tag_lsb);
59 tag_val |= hyp_va_msb;
60 tag_val >>= tag_lsb;
61 }
62 }
63
compute_instruction(int n,u32 rd,u32 rn)64 static u32 compute_instruction(int n, u32 rd, u32 rn)
65 {
66 u32 insn = AARCH64_BREAK_FAULT;
67
68 switch (n) {
69 case 0:
70 insn = aarch64_insn_gen_logical_immediate(AARCH64_INSN_LOGIC_AND,
71 AARCH64_INSN_VARIANT_64BIT,
72 rn, rd, va_mask);
73 break;
74
75 case 1:
76 /* ROR is a variant of EXTR with Rm = Rn */
77 insn = aarch64_insn_gen_extr(AARCH64_INSN_VARIANT_64BIT,
78 rn, rn, rd,
79 tag_lsb);
80 break;
81
82 case 2:
83 insn = aarch64_insn_gen_add_sub_imm(rd, rn,
84 tag_val & GENMASK(11, 0),
85 AARCH64_INSN_VARIANT_64BIT,
86 AARCH64_INSN_ADSB_ADD);
87 break;
88
89 case 3:
90 insn = aarch64_insn_gen_add_sub_imm(rd, rn,
91 tag_val & GENMASK(23, 12),
92 AARCH64_INSN_VARIANT_64BIT,
93 AARCH64_INSN_ADSB_ADD);
94 break;
95
96 case 4:
97 /* ROR is a variant of EXTR with Rm = Rn */
98 insn = aarch64_insn_gen_extr(AARCH64_INSN_VARIANT_64BIT,
99 rn, rn, rd, 64 - tag_lsb);
100 break;
101 }
102
103 return insn;
104 }
105
kvm_update_va_mask(struct alt_instr * alt,__le32 * origptr,__le32 * updptr,int nr_inst)106 void __init kvm_update_va_mask(struct alt_instr *alt,
107 __le32 *origptr, __le32 *updptr, int nr_inst)
108 {
109 int i;
110
111 BUG_ON(nr_inst != 5);
112
113 if (!has_vhe() && !va_mask)
114 compute_layout();
115
116 for (i = 0; i < nr_inst; i++) {
117 u32 rd, rn, insn, oinsn;
118
119 /*
120 * VHE doesn't need any address translation, let's NOP
121 * everything.
122 *
123 * Alternatively, if we don't have any spare bits in
124 * the address, NOP everything after masking that
125 * kernel VA.
126 */
127 if (has_vhe() || (!tag_lsb && i > 0)) {
128 updptr[i] = cpu_to_le32(aarch64_insn_gen_nop());
129 continue;
130 }
131
132 oinsn = le32_to_cpu(origptr[i]);
133 rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, oinsn);
134 rn = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RN, oinsn);
135
136 insn = compute_instruction(i, rd, rn);
137 BUG_ON(insn == AARCH64_BREAK_FAULT);
138
139 updptr[i] = cpu_to_le32(insn);
140 }
141 }
142
143 void *__kvm_bp_vect_base;
144 int __kvm_harden_el2_vector_slot;
145
kvm_patch_vector_branch(struct alt_instr * alt,__le32 * origptr,__le32 * updptr,int nr_inst)146 void kvm_patch_vector_branch(struct alt_instr *alt,
147 __le32 *origptr, __le32 *updptr, int nr_inst)
148 {
149 u64 addr;
150 u32 insn;
151
152 BUG_ON(nr_inst != 5);
153
154 if (has_vhe() || !cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
155 WARN_ON_ONCE(cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS));
156 return;
157 }
158
159 if (!va_mask)
160 compute_layout();
161
162 /*
163 * Compute HYP VA by using the same computation as kern_hyp_va()
164 */
165 addr = (uintptr_t)kvm_ksym_ref(__kvm_hyp_vector);
166 addr &= va_mask;
167 addr |= tag_val << tag_lsb;
168
169 /* Use PC[10:7] to branch to the same vector in KVM */
170 addr |= ((u64)origptr & GENMASK_ULL(10, 7));
171
172 /*
173 * Branch over the preamble in order to avoid the initial store on
174 * the stack (which we already perform in the hardening vectors).
175 */
176 addr += KVM_VECTOR_PREAMBLE;
177
178 /* stp x0, x1, [sp, #-16]! */
179 insn = aarch64_insn_gen_load_store_pair(AARCH64_INSN_REG_0,
180 AARCH64_INSN_REG_1,
181 AARCH64_INSN_REG_SP,
182 -16,
183 AARCH64_INSN_VARIANT_64BIT,
184 AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX);
185 *updptr++ = cpu_to_le32(insn);
186
187 /* movz x0, #(addr & 0xffff) */
188 insn = aarch64_insn_gen_movewide(AARCH64_INSN_REG_0,
189 (u16)addr,
190 0,
191 AARCH64_INSN_VARIANT_64BIT,
192 AARCH64_INSN_MOVEWIDE_ZERO);
193 *updptr++ = cpu_to_le32(insn);
194
195 /* movk x0, #((addr >> 16) & 0xffff), lsl #16 */
196 insn = aarch64_insn_gen_movewide(AARCH64_INSN_REG_0,
197 (u16)(addr >> 16),
198 16,
199 AARCH64_INSN_VARIANT_64BIT,
200 AARCH64_INSN_MOVEWIDE_KEEP);
201 *updptr++ = cpu_to_le32(insn);
202
203 /* movk x0, #((addr >> 32) & 0xffff), lsl #32 */
204 insn = aarch64_insn_gen_movewide(AARCH64_INSN_REG_0,
205 (u16)(addr >> 32),
206 32,
207 AARCH64_INSN_VARIANT_64BIT,
208 AARCH64_INSN_MOVEWIDE_KEEP);
209 *updptr++ = cpu_to_le32(insn);
210
211 /* br x0 */
212 insn = aarch64_insn_gen_branch_reg(AARCH64_INSN_REG_0,
213 AARCH64_INSN_BRANCH_NOLINK);
214 *updptr++ = cpu_to_le32(insn);
215 }
216