1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2021 - Google LLC
4 * Author: David Brazdil <dbrazdil@google.com>
5 */
6
7 #include <linux/kvm_host.h>
8
9 #include <asm/kvm_asm.h>
10 #include <asm/kvm_hyp.h>
11 #include <asm/kvm_mmu.h>
12 #include <asm/kvm_s2mpu.h>
13
14 #include <linux/arm-smccc.h>
15
16 #include <nvhe/iommu.h>
17 #include <nvhe/memory.h>
18 #include <nvhe/mm.h>
19 #include <nvhe/spinlock.h>
20 #include <nvhe/trap_handler.h>
21
22 #define SMC_CMD_PREPARE_PD_ONOFF 0x82000410
23 #define SMC_MODE_POWER_UP 1
24
25 #define PA_MAX ((phys_addr_t)SZ_1G * NR_GIGABYTES)
26
27 #define SYNC_MAX_RETRIES 5
28 #define SYNC_TIMEOUT 5
29 #define SYNC_TIMEOUT_MULTIPLIER 3
30
31 #define CTX_CFG_ENTRY(ctxid, nr_ctx, vid) \
32 (CONTEXT_CFG_VALID_VID_CTX_VID(ctxid, vid) \
33 | (((ctxid) < (nr_ctx)) ? CONTEXT_CFG_VALID_VID_CTX_VALID(ctxid) : 0))
34
35 #define for_each_child(child, dev) \
36 list_for_each_entry((child), &(dev)->children, siblings)
37
38 struct s2mpu_drv_data {
39 u32 version;
40 u32 context_cfg_valid_vid;
41 };
42
43 static struct mpt host_mpt;
44
prot_to_mpt(enum kvm_pgtable_prot prot)45 static inline enum mpt_prot prot_to_mpt(enum kvm_pgtable_prot prot)
46 {
47 return ((prot & KVM_PGTABLE_PROT_R) ? MPT_PROT_R : 0) |
48 ((prot & KVM_PGTABLE_PROT_W) ? MPT_PROT_W : 0);
49 }
50
is_version(struct pkvm_iommu * dev,u32 version)51 static bool is_version(struct pkvm_iommu *dev, u32 version)
52 {
53 struct s2mpu_drv_data *data = (struct s2mpu_drv_data *)dev->data;
54
55 return (data->version & VERSION_CHECK_MASK) == version;
56 }
57
__context_cfg_valid_vid(struct pkvm_iommu * dev,u32 vid_bmap)58 static u32 __context_cfg_valid_vid(struct pkvm_iommu *dev, u32 vid_bmap)
59 {
60 struct s2mpu_drv_data *data = (struct s2mpu_drv_data *)dev->data;
61 u8 ctx_vid[NR_CTX_IDS] = { 0 };
62 unsigned int vid, ctx = 0;
63 unsigned int num_ctx;
64 u32 res;
65
66 /* Only initialize once. */
67 if (data->context_cfg_valid_vid)
68 return data->context_cfg_valid_vid;
69
70 num_ctx = readl_relaxed(dev->va + REG_NS_NUM_CONTEXT) & NUM_CONTEXT_MASK;
71 while (vid_bmap) {
72 /* Break if we cannot allocate more. */
73 if (ctx >= num_ctx)
74 break;
75
76 vid = __ffs(vid_bmap);
77 vid_bmap &= ~BIT(vid);
78 ctx_vid[ctx++] = vid;
79 }
80
81 /* The following loop was unrolled so bitmasks are constant. */
82 BUILD_BUG_ON(NR_CTX_IDS != 8);
83 res = CTX_CFG_ENTRY(0, ctx, ctx_vid[0])
84 | CTX_CFG_ENTRY(1, ctx, ctx_vid[1])
85 | CTX_CFG_ENTRY(2, ctx, ctx_vid[2])
86 | CTX_CFG_ENTRY(3, ctx, ctx_vid[3])
87 | CTX_CFG_ENTRY(4, ctx, ctx_vid[4])
88 | CTX_CFG_ENTRY(5, ctx, ctx_vid[5])
89 | CTX_CFG_ENTRY(6, ctx, ctx_vid[6])
90 | CTX_CFG_ENTRY(7, ctx, ctx_vid[7]);
91
92 data->context_cfg_valid_vid = res;
93 return res;
94 }
95
__initialize_v9(struct pkvm_iommu * dev)96 static int __initialize_v9(struct pkvm_iommu *dev)
97 {
98 u32 ssmt_valid_vid_bmap, ctx_cfg;
99
100 /* Assume all VIDs may be generated by the connected SSMTs for now. */
101 ssmt_valid_vid_bmap = ALL_VIDS_BITMAP;
102 ctx_cfg = __context_cfg_valid_vid(dev, ssmt_valid_vid_bmap);
103 if (!ctx_cfg)
104 return -EINVAL;
105
106 /*
107 * Write CONTEXT_CFG_VALID_VID configuration before touching L1ENTRY*
108 * registers. Writes to those registers are ignored unless there is
109 * a context ID allocated to the corresponding VID (v9 only).
110 */
111 writel_relaxed(ctx_cfg, dev->va + REG_NS_CONTEXT_CFG_VALID_VID);
112 return 0;
113 }
114
__initialize(struct pkvm_iommu * dev)115 static int __initialize(struct pkvm_iommu *dev)
116 {
117 struct s2mpu_drv_data *data = (struct s2mpu_drv_data *)dev->data;
118
119 if (!data->version)
120 data->version = readl_relaxed(dev->va + REG_NS_VERSION);
121
122 switch (data->version & VERSION_CHECK_MASK) {
123 case S2MPU_VERSION_8:
124 return 0;
125 case S2MPU_VERSION_9:
126 return __initialize_v9(dev);
127 default:
128 return -EINVAL;
129 }
130 }
131
__set_control_regs(struct pkvm_iommu * dev)132 static void __set_control_regs(struct pkvm_iommu *dev)
133 {
134 u32 ctrl0 = 0, irq_vids;
135
136 /*
137 * Note: We set the values of CTRL0, CTRL1 and CFG registers here but we
138 * still rely on the correctness of their reset values. S2MPUs *must*
139 * reset to a state where all DMA traffic is blocked until the hypervisor
140 * writes its configuration to the S2MPU. A malicious EL1 could otherwise
141 * attempt to bypass the permission checks in the window between powering
142 * on the S2MPU and this function being called.
143 */
144
145 /* Enable the S2MPU, otherwise all traffic would be allowed through. */
146 ctrl0 |= CTRL0_ENABLE;
147
148 /*
149 * Enable interrupts on fault for all VIDs. The IRQ must also be
150 * specified in DT to get unmasked in the GIC.
151 */
152 ctrl0 |= CTRL0_INTERRUPT_ENABLE;
153 irq_vids = ALL_VIDS_BITMAP;
154
155 /* Return SLVERR/DECERR to device on permission fault. */
156 ctrl0 |= is_version(dev, S2MPU_VERSION_9) ? CTRL0_FAULT_RESP_TYPE_DECERR
157 : CTRL0_FAULT_RESP_TYPE_SLVERR;
158
159 writel_relaxed(irq_vids, dev->va + REG_NS_INTERRUPT_ENABLE_PER_VID_SET);
160 writel_relaxed(0, dev->va + REG_NS_CFG);
161 writel_relaxed(0, dev->va + REG_NS_CTRL1);
162 writel_relaxed(ctrl0, dev->va + REG_NS_CTRL0);
163 }
164
165 /*
166 * Poll the given SFR until its value has all bits of a given mask set.
167 * Returns true if successful, false if not successful after a given number of
168 * attempts.
169 */
__wait_until(void __iomem * addr,u32 mask,size_t max_attempts)170 static bool __wait_until(void __iomem *addr, u32 mask, size_t max_attempts)
171 {
172 size_t i;
173
174 for (i = 0; i < max_attempts; i++) {
175 if ((readl_relaxed(addr) & mask) == mask)
176 return true;
177 }
178 return false;
179 }
180
181 /* Poll the given SFR as long as its value has all bits of a given mask set. */
__wait_while(void __iomem * addr,u32 mask)182 static void __wait_while(void __iomem *addr, u32 mask)
183 {
184 while ((readl_relaxed(addr) & mask) == mask)
185 continue;
186 }
187
__sync_cmd_start(struct pkvm_iommu * sync)188 static void __sync_cmd_start(struct pkvm_iommu *sync)
189 {
190 writel_relaxed(SYNC_CMD_SYNC, sync->va + REG_NS_SYNC_CMD);
191 }
192
__invalidation_barrier_slow(struct pkvm_iommu * sync)193 static void __invalidation_barrier_slow(struct pkvm_iommu *sync)
194 {
195 size_t i, timeout;
196
197 /*
198 * Wait for transactions to drain if SysMMU_SYNCs were registered.
199 * Assumes that they are in the same power domain as the S2MPU.
200 *
201 * The algorithm will try initiating the SYNC if the SYNC_COMP_COMPLETE
202 * bit has not been set after a given number of attempts, increasing the
203 * timeout exponentially each time. If this cycle fails a given number
204 * of times, the algorithm will give up completely to avoid deadlock.
205 */
206 timeout = SYNC_TIMEOUT;
207 for (i = 0; i < SYNC_MAX_RETRIES; i++) {
208 __sync_cmd_start(sync);
209 if (__wait_until(sync->va + REG_NS_SYNC_COMP, SYNC_COMP_COMPLETE, timeout))
210 break;
211 timeout *= SYNC_TIMEOUT_MULTIPLIER;
212 }
213 }
214
215 /* Initiate invalidation barrier. */
__invalidation_barrier_init(struct pkvm_iommu * dev)216 static void __invalidation_barrier_init(struct pkvm_iommu *dev)
217 {
218 struct pkvm_iommu *sync;
219
220 for_each_child(sync, dev)
221 __sync_cmd_start(sync);
222 }
223
224 /* Wait for invalidation to complete. */
__invalidation_barrier_complete(struct pkvm_iommu * dev)225 static void __invalidation_barrier_complete(struct pkvm_iommu *dev)
226 {
227 struct pkvm_iommu *sync;
228
229 /*
230 * Check if the SYNC_COMP_COMPLETE bit has been set for individual
231 * devices. If not, fall back to non-parallel invalidation.
232 */
233 for_each_child(sync, dev) {
234 if (!(readl_relaxed(sync->va + REG_NS_SYNC_COMP) & SYNC_COMP_COMPLETE))
235 __invalidation_barrier_slow(sync);
236 }
237
238 /* Must not access SFRs while S2MPU is busy invalidating (v9 only). */
239 if (is_version(dev, S2MPU_VERSION_9)) {
240 __wait_while(dev->va + REG_NS_STATUS,
241 STATUS_BUSY | STATUS_ON_INVALIDATING);
242 }
243 }
244
__all_invalidation(struct pkvm_iommu * dev)245 static void __all_invalidation(struct pkvm_iommu *dev)
246 {
247 writel_relaxed(INVALIDATION_INVALIDATE, dev->va + REG_NS_ALL_INVALIDATION);
248 __invalidation_barrier_init(dev);
249 __invalidation_barrier_complete(dev);
250 }
251
__range_invalidation_init(struct pkvm_iommu * dev,phys_addr_t first_byte,phys_addr_t last_byte)252 static void __range_invalidation_init(struct pkvm_iommu *dev, phys_addr_t first_byte,
253 phys_addr_t last_byte)
254 {
255 u32 start_ppn = first_byte >> RANGE_INVALIDATION_PPN_SHIFT;
256 u32 end_ppn = last_byte >> RANGE_INVALIDATION_PPN_SHIFT;
257
258 writel_relaxed(start_ppn, dev->va + REG_NS_RANGE_INVALIDATION_START_PPN);
259 writel_relaxed(end_ppn, dev->va + REG_NS_RANGE_INVALIDATION_END_PPN);
260 writel_relaxed(INVALIDATION_INVALIDATE, dev->va + REG_NS_RANGE_INVALIDATION);
261 __invalidation_barrier_init(dev);
262 }
263
__set_l1entry_attr_with_prot(struct pkvm_iommu * dev,unsigned int gb,unsigned int vid,enum mpt_prot prot)264 static void __set_l1entry_attr_with_prot(struct pkvm_iommu *dev, unsigned int gb,
265 unsigned int vid, enum mpt_prot prot)
266 {
267 writel_relaxed(L1ENTRY_ATTR_1G(prot),
268 dev->va + REG_NS_L1ENTRY_ATTR(vid, gb));
269 }
270
__set_l1entry_attr_with_fmpt(struct pkvm_iommu * dev,unsigned int gb,unsigned int vid,struct fmpt * fmpt)271 static void __set_l1entry_attr_with_fmpt(struct pkvm_iommu *dev, unsigned int gb,
272 unsigned int vid, struct fmpt *fmpt)
273 {
274 if (fmpt->gran_1g) {
275 __set_l1entry_attr_with_prot(dev, gb, vid, fmpt->prot);
276 } else {
277 /* Order against writes to the SMPT. */
278 writel(L1ENTRY_ATTR_L2(SMPT_GRAN_ATTR),
279 dev->va + REG_NS_L1ENTRY_ATTR(vid, gb));
280 }
281 }
282
__set_l1entry_l2table_addr(struct pkvm_iommu * dev,unsigned int gb,unsigned int vid,phys_addr_t addr)283 static void __set_l1entry_l2table_addr(struct pkvm_iommu *dev, unsigned int gb,
284 unsigned int vid, phys_addr_t addr)
285 {
286 /* Order against writes to the SMPT. */
287 writel(L1ENTRY_L2TABLE_ADDR(addr),
288 dev->va + REG_NS_L1ENTRY_L2TABLE_ADDR(vid, gb));
289 }
290
291 /*
292 * Initialize S2MPU device and set all GB regions to 1G granularity with
293 * given protection bits.
294 */
initialize_with_prot(struct pkvm_iommu * dev,enum mpt_prot prot)295 static int initialize_with_prot(struct pkvm_iommu *dev, enum mpt_prot prot)
296 {
297 unsigned int gb, vid;
298 int ret;
299
300 ret = __initialize(dev);
301 if (ret)
302 return ret;
303
304 for_each_gb_and_vid(gb, vid)
305 __set_l1entry_attr_with_prot(dev, gb, vid, prot);
306 __all_invalidation(dev);
307
308 /* Set control registers, enable the S2MPU. */
309 __set_control_regs(dev);
310 return 0;
311 }
312
313 /*
314 * Initialize S2MPU device, set L2 table addresses and configure L1TABLE_ATTR
315 * registers according to the given MPT struct.
316 */
initialize_with_mpt(struct pkvm_iommu * dev,struct mpt * mpt)317 static int initialize_with_mpt(struct pkvm_iommu *dev, struct mpt *mpt)
318 {
319 unsigned int gb, vid;
320 struct fmpt *fmpt;
321 int ret;
322
323 ret = __initialize(dev);
324 if (ret)
325 return ret;
326
327 for_each_gb_and_vid(gb, vid) {
328 fmpt = &mpt->fmpt[gb];
329 __set_l1entry_l2table_addr(dev, gb, vid, __hyp_pa(fmpt->smpt));
330 __set_l1entry_attr_with_fmpt(dev, gb, vid, fmpt);
331 }
332 __all_invalidation(dev);
333
334 /* Set control registers, enable the S2MPU. */
335 __set_control_regs(dev);
336 return 0;
337 }
338
to_valid_range(phys_addr_t * start,phys_addr_t * end)339 static bool to_valid_range(phys_addr_t *start, phys_addr_t *end)
340 {
341 phys_addr_t new_start = *start;
342 phys_addr_t new_end = *end;
343
344 if (new_end > PA_MAX)
345 new_end = PA_MAX;
346
347 new_start = ALIGN_DOWN(new_start, SMPT_GRAN);
348 new_end = ALIGN(new_end, SMPT_GRAN);
349
350 if (new_start >= new_end)
351 return false;
352
353 *start = new_start;
354 *end = new_end;
355 return true;
356 }
357
__mpt_idmap_prepare(struct mpt * mpt,phys_addr_t first_byte,phys_addr_t last_byte,enum mpt_prot prot)358 static void __mpt_idmap_prepare(struct mpt *mpt, phys_addr_t first_byte,
359 phys_addr_t last_byte, enum mpt_prot prot)
360 {
361 unsigned int first_gb = first_byte / SZ_1G;
362 unsigned int last_gb = last_byte / SZ_1G;
363 size_t start_gb_byte, end_gb_byte;
364 unsigned int gb;
365 struct fmpt *fmpt;
366
367 for_each_gb_in_range(gb, first_gb, last_gb) {
368 fmpt = &mpt->fmpt[gb];
369 start_gb_byte = (gb == first_gb) ? first_byte % SZ_1G : 0;
370 end_gb_byte = (gb == last_gb) ? (last_byte % SZ_1G) + 1 : SZ_1G;
371
372 __set_fmpt_range(fmpt, start_gb_byte, end_gb_byte, prot);
373
374 if (fmpt->flags & MPT_UPDATE_L2)
375 kvm_flush_dcache_to_poc(fmpt->smpt, SMPT_SIZE);
376 }
377 }
378
__mpt_idmap_apply(struct pkvm_iommu * dev,struct mpt * mpt,phys_addr_t first_byte,phys_addr_t last_byte)379 static void __mpt_idmap_apply(struct pkvm_iommu *dev, struct mpt *mpt,
380 phys_addr_t first_byte, phys_addr_t last_byte)
381 {
382 unsigned int first_gb = first_byte / SZ_1G;
383 unsigned int last_gb = last_byte / SZ_1G;
384 unsigned int gb, vid;
385 struct fmpt *fmpt;
386
387 for_each_gb_in_range(gb, first_gb, last_gb) {
388 fmpt = &mpt->fmpt[gb];
389
390 if (fmpt->flags & MPT_UPDATE_L1) {
391 for_each_vid(vid)
392 __set_l1entry_attr_with_fmpt(dev, gb, vid, fmpt);
393 }
394 }
395 /* Initiate invalidation, completed in __mdt_idmap_complete. */
396 __range_invalidation_init(dev, first_byte, last_byte);
397 }
398
__mpt_idmap_complete(struct pkvm_iommu * dev,struct mpt * mpt)399 static void __mpt_idmap_complete(struct pkvm_iommu *dev, struct mpt *mpt)
400 {
401 __invalidation_barrier_complete(dev);
402 }
403
s2mpu_host_stage2_idmap_prepare(phys_addr_t start,phys_addr_t end,enum kvm_pgtable_prot prot)404 static void s2mpu_host_stage2_idmap_prepare(phys_addr_t start, phys_addr_t end,
405 enum kvm_pgtable_prot prot)
406 {
407 if (!to_valid_range(&start, &end))
408 return;
409
410 __mpt_idmap_prepare(&host_mpt, start, end - 1, prot_to_mpt(prot));
411 }
412
s2mpu_host_stage2_idmap_apply(struct pkvm_iommu * dev,phys_addr_t start,phys_addr_t end)413 static void s2mpu_host_stage2_idmap_apply(struct pkvm_iommu *dev,
414 phys_addr_t start, phys_addr_t end)
415 {
416 if (!to_valid_range(&start, &end))
417 return;
418
419 __mpt_idmap_apply(dev, &host_mpt, start, end - 1);
420 }
421
s2mpu_host_stage2_idmap_complete(struct pkvm_iommu * dev)422 static void s2mpu_host_stage2_idmap_complete(struct pkvm_iommu *dev)
423 {
424 __mpt_idmap_complete(dev, &host_mpt);
425 }
426
s2mpu_resume(struct pkvm_iommu * dev)427 static int s2mpu_resume(struct pkvm_iommu *dev)
428 {
429 /*
430 * Initialize the S2MPU with the host stage-2 MPT. It is paramount
431 * that the S2MPU reset state is enabled and blocking all traffic,
432 * otherwise the host would not be forced to call the resume HVC
433 * before issuing DMA traffic.
434 */
435 return initialize_with_mpt(dev, &host_mpt);
436 }
437
s2mpu_suspend(struct pkvm_iommu * dev)438 static int s2mpu_suspend(struct pkvm_iommu *dev)
439 {
440 /*
441 * Stop updating the S2MPU when the host informs us about the intention
442 * to suspend it. Writes to powered-down MMIO registers would trigger
443 * SErrors in EL1 otherwise. However, hyp must put S2MPU back to
444 * blocking state first, in case the host does not actually power it
445 * down and continues issuing DMA traffic.
446 */
447 return initialize_with_prot(dev, MPT_PROT_NONE);
448 }
449
host_mmio_reg_access_mask(size_t off,bool is_write)450 static u32 host_mmio_reg_access_mask(size_t off, bool is_write)
451 {
452 const u32 no_access = 0;
453 const u32 read_write = (u32)(-1);
454 const u32 read_only = is_write ? no_access : read_write;
455 const u32 write_only = is_write ? read_write : no_access;
456 u32 masked_off;
457
458 switch (off) {
459 /* Allow reading control registers for debugging. */
460 case REG_NS_CTRL0:
461 return read_only & CTRL0_MASK;
462 case REG_NS_CTRL1:
463 return read_only & CTRL1_MASK;
464 case REG_NS_CFG:
465 return read_only & CFG_MASK;
466 /* Allow EL1 IRQ handler to clear interrupts. */
467 case REG_NS_INTERRUPT_CLEAR:
468 return write_only & ALL_VIDS_BITMAP;
469 /* Allow reading number of sets used by MPTC. */
470 case REG_NS_INFO:
471 return read_only & INFO_NUM_SET_MASK;
472 /* Allow EL1 IRQ handler to read bitmap of pending interrupts. */
473 case REG_NS_FAULT_STATUS:
474 return read_only & ALL_VIDS_BITMAP;
475 /*
476 * Allow reading MPTC entries for debugging. That involves:
477 * - writing (set,way) to READ_MPTC
478 * - reading READ_MPTC_*
479 */
480 case REG_NS_READ_MPTC:
481 return write_only & READ_MPTC_MASK;
482 case REG_NS_READ_MPTC_TAG_PPN:
483 return read_only & READ_MPTC_TAG_PPN_MASK;
484 case REG_NS_READ_MPTC_TAG_OTHERS:
485 return read_only & READ_MPTC_TAG_OTHERS_MASK;
486 case REG_NS_READ_MPTC_DATA:
487 return read_only;
488 }
489
490 /* Allow reading L1ENTRY registers for debugging. */
491 if (off >= REG_NS_L1ENTRY_L2TABLE_ADDR(0, 0) &&
492 off < REG_NS_L1ENTRY_ATTR(NR_VIDS, 0))
493 return read_only;
494
495 /* Allow EL1 IRQ handler to read fault information. */
496 masked_off = off & ~REG_NS_FAULT_VID_MASK;
497 if ((masked_off == REG_NS_FAULT_PA_LOW(0)) ||
498 (masked_off == REG_NS_FAULT_PA_HIGH(0)) ||
499 (masked_off == REG_NS_FAULT_INFO(0)))
500 return read_only;
501
502 return no_access;
503 }
504
s2mpu_host_dabt_handler(struct pkvm_iommu * dev,struct kvm_cpu_context * host_ctxt,u32 esr,size_t off)505 static bool s2mpu_host_dabt_handler(struct pkvm_iommu *dev,
506 struct kvm_cpu_context *host_ctxt,
507 u32 esr, size_t off)
508 {
509 bool is_write = esr & ESR_ELx_WNR;
510 unsigned int len = BIT((esr & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
511 int rd = (esr & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
512 u32 mask;
513
514 /* Only handle MMIO access with u32 size and alignment. */
515 if ((len != sizeof(u32)) || (off & (sizeof(u32) - 1)))
516 return false;
517
518 mask = host_mmio_reg_access_mask(off, is_write);
519 if (!mask)
520 return false;
521
522 if (is_write)
523 writel_relaxed(cpu_reg(host_ctxt, rd) & mask, dev->va + off);
524 else
525 cpu_reg(host_ctxt, rd) = readl_relaxed(dev->va + off) & mask;
526 return true;
527 }
528
s2mpu_init(void * data,size_t size)529 static int s2mpu_init(void *data, size_t size)
530 {
531 struct mpt in_mpt;
532 u32 *smpt;
533 phys_addr_t pa;
534 unsigned int gb;
535 int ret = 0;
536
537 if (size != sizeof(in_mpt))
538 return -EINVAL;
539
540 /* The host can concurrently modify 'data'. Copy it to avoid TOCTOU. */
541 memcpy(&in_mpt, data, sizeof(in_mpt));
542
543 /* Take ownership of all SMPT buffers. This will also map them in. */
544 for_each_gb(gb) {
545 smpt = kern_hyp_va(in_mpt.fmpt[gb].smpt);
546 pa = __hyp_pa(smpt);
547
548 if (!IS_ALIGNED(pa, SMPT_SIZE)) {
549 ret = -EINVAL;
550 break;
551 }
552
553 ret = __pkvm_host_donate_hyp(pa >> PAGE_SHIFT, SMPT_NUM_PAGES);
554 if (ret)
555 break;
556
557 host_mpt.fmpt[gb] = (struct fmpt){
558 .smpt = smpt,
559 .gran_1g = true,
560 .prot = MPT_PROT_RW,
561 };
562 }
563
564 /* Try to return memory back if there was an error. */
565 if (ret) {
566 for_each_gb(gb) {
567 smpt = host_mpt.fmpt[gb].smpt;
568 if (!smpt)
569 break;
570
571 WARN_ON(__pkvm_hyp_donate_host(__hyp_pa(smpt) >> PAGE_SHIFT,
572 SMPT_NUM_PAGES));
573 }
574 memset(&host_mpt, 0, sizeof(host_mpt));
575 }
576
577 return ret;
578 }
579
s2mpu_validate(struct pkvm_iommu * dev)580 static int s2mpu_validate(struct pkvm_iommu *dev)
581 {
582 if (dev->size != S2MPU_MMIO_SIZE)
583 return -EINVAL;
584
585 return 0;
586 }
587
s2mpu_validate_child(struct pkvm_iommu * dev,struct pkvm_iommu * child)588 static int s2mpu_validate_child(struct pkvm_iommu *dev, struct pkvm_iommu *child)
589 {
590 if (child->ops != &pkvm_sysmmu_sync_ops)
591 return -EINVAL;
592
593 return 0;
594 }
595
sysmmu_sync_validate(struct pkvm_iommu * dev)596 static int sysmmu_sync_validate(struct pkvm_iommu *dev)
597 {
598 if (dev->size != SYSMMU_SYNC_S2_MMIO_SIZE)
599 return -EINVAL;
600
601 if (!dev->parent || dev->parent->ops != &pkvm_s2mpu_ops)
602 return -EINVAL;
603
604 return 0;
605 }
606
607 const struct pkvm_iommu_ops pkvm_s2mpu_ops = (struct pkvm_iommu_ops){
608 .init = s2mpu_init,
609 .validate = s2mpu_validate,
610 .validate_child = s2mpu_validate_child,
611 .resume = s2mpu_resume,
612 .suspend = s2mpu_suspend,
613 .host_stage2_idmap_prepare = s2mpu_host_stage2_idmap_prepare,
614 .host_stage2_idmap_apply = s2mpu_host_stage2_idmap_apply,
615 .host_stage2_idmap_complete = s2mpu_host_stage2_idmap_complete,
616 .host_dabt_handler = s2mpu_host_dabt_handler,
617 .data_size = sizeof(struct s2mpu_drv_data),
618 };
619
620 const struct pkvm_iommu_ops pkvm_sysmmu_sync_ops = (struct pkvm_iommu_ops){
621 .validate = sysmmu_sync_validate,
622 };
623