1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2023 - Google LLC
4 * Author: Will Deacon <willdeacon@google.com>
5 */
6 #include <asm/kvm_host.h>
7 #include <asm/pgtable.h>
8
9 #include <linux/init.h>
10 #include <linux/jump_label.h>
11 #include <linux/memblock.h>
12
13 DEFINE_STATIC_KEY_FALSE(pkvm_force_nc);
early_pkvm_force_nc_cfg(char * arg)14 static int __init early_pkvm_force_nc_cfg(char *arg)
15 {
16 static_branch_enable(&pkvm_force_nc);
17 return 0;
18 }
19 early_param("kvm-arm.force_nc", early_pkvm_force_nc_cfg);
20
21 /*
22 * Update the stage-2 memory attributes (cacheability) for a page, usually
23 * in response to mapping or unmapping a normal non-cacheable region at stage-1.
24 *
25 * If 'force_nc' is set, the stage-2 entry is immediately made non-cacheable
26 * (and cleaned+invalidated to the PoC) otherwise the entry is unmapped and the
27 * cacheability determined based on the stage-1 attribute of the next access
28 * (with no cache maintenance being performed).
29 */
30 struct pkvm_host_nc_region {
31 phys_addr_t start;
32 phys_addr_t end;
33 };
34
35 #define PKVM_HOST_MAX_EARLY_NC_REGIONS 8
36 static struct pkvm_host_nc_region
37 pkvm_host_early_nc_regions[PKVM_HOST_MAX_EARLY_NC_REGIONS];
38
pkvm_host_track_early_nc_mapping(phys_addr_t addr)39 static void pkvm_host_track_early_nc_mapping(phys_addr_t addr)
40 {
41 static int idx /*= 0*/;
42 struct pkvm_host_nc_region *reg = &pkvm_host_early_nc_regions[idx];
43
44 if (reg->start == reg->end) {
45 reg->start = addr;
46 } else if (reg->end != addr) {
47 if (WARN_ON(idx == PKVM_HOST_MAX_EARLY_NC_REGIONS - 1))
48 return;
49
50 reg = &pkvm_host_early_nc_regions[++idx];
51 reg->start = addr;
52 }
53
54 reg->end = addr + PAGE_SIZE;
55 }
56
pkvm_host_set_stage2_memattr(phys_addr_t addr,bool force_nc)57 void pkvm_host_set_stage2_memattr(phys_addr_t addr, bool force_nc)
58 {
59 int err;
60
61 if (kvm_get_mode() != KVM_MODE_PROTECTED)
62 return;
63
64 /*
65 * Non-memory regions or carveouts marked as "no-map" are handled
66 * entirely by their corresponding driver, which should avoid the
67 * creation of a cacheable alias in the first place.
68 */
69 if (!memblock_is_map_memory(addr))
70 return;
71
72 if (!is_pkvm_initialized()) {
73 if (!WARN_ON_ONCE(!force_nc))
74 pkvm_host_track_early_nc_mapping(addr);
75 return;
76 }
77
78 err = kvm_call_hyp_nvhe(__pkvm_host_set_stage2_memattr, addr, force_nc);
79 WARN_ON(err && err != -EAGAIN);
80 }
81 EXPORT_SYMBOL_GPL(pkvm_host_set_stage2_memattr);
82
pkvm_register_early_nc_mappings(void)83 int __init pkvm_register_early_nc_mappings(void)
84 {
85 int i;
86
87 if (!is_pkvm_initialized())
88 return 0;
89
90 for (i = 0; i < PKVM_HOST_MAX_EARLY_NC_REGIONS; ++i) {
91 struct pkvm_host_nc_region *reg = &pkvm_host_early_nc_regions[i];
92
93 if (reg->start == reg->end)
94 return 0;
95
96 while (reg->start != reg->end) {
97 int err;
98
99 err = kvm_call_hyp_nvhe(__pkvm_host_set_stage2_memattr, reg->start, true);
100 if (err)
101 return err;
102
103 reg->start += PAGE_SIZE;
104 }
105 }
106
107 return 0;
108 }
109