1 // Copyright 2023 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 use std::collections::BTreeMap;
6
7 use base::error;
8 use base::Error;
9 use base::Result;
10 use cros_fdt::Fdt;
11 use cros_fdt::FdtNode;
12 use libc::ENOENT;
13 use libc::ENOTSUP;
14 use libc::ENOTTY;
15 use snapshot::AnySnapshot;
16 use vm_memory::GuestAddress;
17 use vm_memory::MemoryRegionPurpose;
18
19 use super::GunyahVcpu;
20 use super::GunyahVm;
21 use crate::AArch64SysRegId;
22 use crate::Hypervisor;
23 use crate::PsciVersion;
24 use crate::VcpuAArch64;
25 use crate::VcpuRegAArch64;
26 use crate::VmAArch64;
27 use crate::PSCI_0_2;
28
29 const GIC_FDT_IRQ_TYPE_SPI: u32 = 0;
30
31 const IRQ_TYPE_EDGE_RISING: u32 = 0x00000001;
32 const IRQ_TYPE_LEVEL_HIGH: u32 = 0x00000004;
33
fdt_create_shm_device( parent: &mut FdtNode, index: u32, guest_addr: GuestAddress, ) -> cros_fdt::Result<()>34 fn fdt_create_shm_device(
35 parent: &mut FdtNode,
36 index: u32,
37 guest_addr: GuestAddress,
38 ) -> cros_fdt::Result<()> {
39 let shm_name = format!("shm-{:x}", index);
40 let shm_node = parent.subnode_mut(&shm_name)?;
41 shm_node.set_prop("vdevice-type", "shm")?;
42 shm_node.set_prop("peer-default", ())?;
43 shm_node.set_prop("dma_base", 0u64)?;
44 let mem_node = shm_node.subnode_mut("memory")?;
45 // We have to add the shm device for RM to accept the swiotlb memparcel.
46 // Memparcel is only used on android14-6.1. Once android14-6.1 is EOL
47 // we should be able to remove all the times we call fdt_create_shm_device()
48 mem_node.set_prop("optional", ())?;
49 mem_node.set_prop("label", index)?;
50 mem_node.set_prop("#address-cells", 2u32)?;
51 mem_node.set_prop("base", guest_addr.offset())
52 }
53
54 impl VmAArch64 for GunyahVm {
get_hypervisor(&self) -> &dyn Hypervisor55 fn get_hypervisor(&self) -> &dyn Hypervisor {
56 &self.gh
57 }
58
load_protected_vm_firmware( &mut self, fw_addr: GuestAddress, fw_max_size: u64, ) -> Result<()>59 fn load_protected_vm_firmware(
60 &mut self,
61 fw_addr: GuestAddress,
62 fw_max_size: u64,
63 ) -> Result<()> {
64 self.set_protected_vm_firmware_ipa(fw_addr, fw_max_size)
65 }
66
create_vcpu(&self, id: usize) -> Result<Box<dyn VcpuAArch64>>67 fn create_vcpu(&self, id: usize) -> Result<Box<dyn VcpuAArch64>> {
68 Ok(Box::new(GunyahVm::create_vcpu(self, id)?))
69 }
70
create_fdt(&self, fdt: &mut Fdt, phandles: &BTreeMap<&str, u32>) -> cros_fdt::Result<()>71 fn create_fdt(&self, fdt: &mut Fdt, phandles: &BTreeMap<&str, u32>) -> cros_fdt::Result<()> {
72 let top_node = fdt.root_mut().subnode_mut("gunyah-vm-config")?;
73
74 top_node.set_prop("image-name", "crosvm-vm")?;
75 top_node.set_prop("os-type", "linux")?;
76
77 let memory_node = top_node.subnode_mut("memory")?;
78 memory_node.set_prop("#address-cells", 2u32)?;
79 memory_node.set_prop("#size-cells", 2u32)?;
80
81 let mut base_set = false;
82 let mut firmware_set = false;
83 for region in self.guest_mem.regions() {
84 match region.options.purpose {
85 MemoryRegionPurpose::GuestMemoryRegion => {
86 // Assume first GuestMemoryRegion contains the payload
87 if !base_set {
88 base_set = true;
89 memory_node.set_prop("base-address", region.guest_addr.offset())?;
90 }
91 }
92 MemoryRegionPurpose::ProtectedFirmwareRegion => {
93 if firmware_set {
94 // Should only have one protected firmware memory region.
95 error!("Multiple ProtectedFirmwareRegions unexpected.");
96 unreachable!()
97 }
98 firmware_set = true;
99 memory_node.set_prop("firmware-address", region.guest_addr.offset())?;
100 }
101 _ => {}
102 }
103 }
104
105 let interrupts_node = top_node.subnode_mut("interrupts")?;
106 interrupts_node.set_prop("config", *phandles.get("intc").unwrap())?;
107
108 let vcpus_node = top_node.subnode_mut("vcpus")?;
109 vcpus_node.set_prop("affinity", "proxy")?;
110
111 let vdev_node = top_node.subnode_mut("vdevices")?;
112 vdev_node.set_prop("generate", "/hypervisor")?;
113
114 for irq in self.routes.lock().iter() {
115 let bell_name = format!("bell-{:x}", irq.irq);
116 let bell_node = vdev_node.subnode_mut(&bell_name)?;
117 bell_node.set_prop("vdevice-type", "doorbell")?;
118 let path_name = format!("/hypervisor/bell-{:x}", irq.irq);
119 bell_node.set_prop("generate", path_name)?;
120 bell_node.set_prop("label", irq.irq)?;
121 bell_node.set_prop("peer-default", ())?;
122 bell_node.set_prop("source-can-clear", ())?;
123
124 let interrupt_type = if irq.level {
125 IRQ_TYPE_LEVEL_HIGH
126 } else {
127 IRQ_TYPE_EDGE_RISING
128 };
129 let interrupts = [GIC_FDT_IRQ_TYPE_SPI, irq.irq, interrupt_type];
130 bell_node.set_prop("interrupts", &interrupts)?;
131 }
132
133 for region in self.guest_mem.regions() {
134 let create_shm_node = match region.options.purpose {
135 MemoryRegionPurpose::Bios => false,
136 MemoryRegionPurpose::GuestMemoryRegion => false,
137 // Described by the "firmware-address" property
138 MemoryRegionPurpose::ProtectedFirmwareRegion => false,
139 MemoryRegionPurpose::ReservedMemory => false,
140 MemoryRegionPurpose::StaticSwiotlbRegion => true,
141 };
142
143 if create_shm_node {
144 fdt_create_shm_device(
145 vdev_node,
146 region.index.try_into().unwrap(),
147 region.guest_addr,
148 )?;
149 }
150 }
151
152 Ok(())
153 }
154
init_arch( &self, payload_entry_address: GuestAddress, fdt_address: GuestAddress, fdt_size: usize, ) -> Result<()>155 fn init_arch(
156 &self,
157 payload_entry_address: GuestAddress,
158 fdt_address: GuestAddress,
159 fdt_size: usize,
160 ) -> Result<()> {
161 // The payload entry is the memory address where the kernel starts.
162 // This memory region contains both the DTB and the kernel image,
163 // so ensure they are located together.
164
165 let (dtb_mapping, _, dtb_obj_offset) = self
166 .guest_mem
167 .find_region(fdt_address)
168 .map_err(|_| Error::new(ENOENT))?;
169 let (payload_mapping, payload_offset, payload_obj_offset) = self
170 .guest_mem
171 .find_region(payload_entry_address)
172 .map_err(|_| Error::new(ENOENT))?;
173
174 if !std::ptr::eq(dtb_mapping, payload_mapping) || dtb_obj_offset != payload_obj_offset {
175 panic!("DTB and payload are not part of same memory region.");
176 }
177
178 if self.vm_id.is_some() && self.pas_id.is_some() {
179 // Gunyah will find the metadata about the Qualcomm Trusted VM in the
180 // first few pages (decided at build time) of the primary payload region.
181 // This metadata consists of the elf header which tells Gunyah where
182 // the different elf segments (kernel/DTB/ramdisk) are. As we send the entire
183 // primary payload as a single memory parcel to Gunyah, with the offsets from
184 // the elf header, Gunyah can find the VM DTBOs.
185 // Pass on the primary payload region start address and its size for Qualcomm
186 // Trusted VMs.
187 for region in self.guest_mem.regions() {
188 if region.guest_addr.offset() == payload_entry_address.offset() {
189 self.set_vm_auth_type_to_qcom_trusted_vm(payload_entry_address, region.size.try_into().unwrap());
190 break;
191 }
192 }
193 }
194
195 self.set_dtb_config(fdt_address, fdt_size)?;
196
197 // Gunyah sets the PC to the payload entry point for protected VMs without firmware.
198 // It needs to be 0 as Gunyah assumes it to be kernel start.
199 if self.hv_cfg.protection_type.isolates_memory() &&
200 !self.hv_cfg.protection_type.runs_firmware() && payload_offset != 0 {
201 panic!("Payload offset must be zero");
202 }
203
204 if let Err(e) = self.set_boot_pc(payload_entry_address.offset()) {
205 if e.errno() == ENOTTY {
206 // GH_VM_SET_BOOT_CONTEXT ioctl is not supported, but returning success
207 // for backward compatibility when the offset is zero.
208 if payload_offset != 0 {
209 panic!("Payload offset must be zero");
210 }
211 } else {
212 return Err(e);
213 }
214 }
215
216 self.start()?;
217
218 Ok(())
219 }
220 }
221
222 impl VcpuAArch64 for GunyahVcpu {
init(&self, _features: &[crate::VcpuFeature]) -> Result<()>223 fn init(&self, _features: &[crate::VcpuFeature]) -> Result<()> {
224 Ok(())
225 }
226
init_pmu(&self, _irq: u64) -> Result<()>227 fn init_pmu(&self, _irq: u64) -> Result<()> {
228 Err(Error::new(ENOTSUP))
229 }
230
has_pvtime_support(&self) -> bool231 fn has_pvtime_support(&self) -> bool {
232 false
233 }
234
init_pvtime(&self, _pvtime_ipa: u64) -> Result<()>235 fn init_pvtime(&self, _pvtime_ipa: u64) -> Result<()> {
236 Err(Error::new(ENOTSUP))
237 }
238
set_one_reg(&self, _reg_id: VcpuRegAArch64, _data: u64) -> Result<()>239 fn set_one_reg(&self, _reg_id: VcpuRegAArch64, _data: u64) -> Result<()> {
240 unimplemented!()
241 }
242
get_one_reg(&self, _reg_id: VcpuRegAArch64) -> Result<u64>243 fn get_one_reg(&self, _reg_id: VcpuRegAArch64) -> Result<u64> {
244 Err(Error::new(ENOTSUP))
245 }
246
set_vector_reg(&self, _reg_num: u8, _data: u128) -> Result<()>247 fn set_vector_reg(&self, _reg_num: u8, _data: u128) -> Result<()> {
248 unimplemented!()
249 }
250
get_vector_reg(&self, _reg_num: u8) -> Result<u128>251 fn get_vector_reg(&self, _reg_num: u8) -> Result<u128> {
252 unimplemented!()
253 }
254
get_psci_version(&self) -> Result<PsciVersion>255 fn get_psci_version(&self) -> Result<PsciVersion> {
256 Ok(PSCI_0_2)
257 }
258
set_guest_debug(&self, _addrs: &[GuestAddress], _enable_singlestep: bool) -> Result<()>259 fn set_guest_debug(&self, _addrs: &[GuestAddress], _enable_singlestep: bool) -> Result<()> {
260 Err(Error::new(ENOTSUP))
261 }
262
get_max_hw_bps(&self) -> Result<usize>263 fn get_max_hw_bps(&self) -> Result<usize> {
264 Err(Error::new(ENOTSUP))
265 }
266
get_system_regs(&self) -> Result<BTreeMap<AArch64SysRegId, u64>>267 fn get_system_regs(&self) -> Result<BTreeMap<AArch64SysRegId, u64>> {
268 Err(Error::new(ENOTSUP))
269 }
270
get_cache_info(&self) -> Result<BTreeMap<u8, u64>>271 fn get_cache_info(&self) -> Result<BTreeMap<u8, u64>> {
272 Err(Error::new(ENOTSUP))
273 }
274
set_cache_info(&self, _cache_info: BTreeMap<u8, u64>) -> Result<()>275 fn set_cache_info(&self, _cache_info: BTreeMap<u8, u64>) -> Result<()> {
276 Err(Error::new(ENOTSUP))
277 }
278
hypervisor_specific_snapshot(&self) -> anyhow::Result<AnySnapshot>279 fn hypervisor_specific_snapshot(&self) -> anyhow::Result<AnySnapshot> {
280 unimplemented!()
281 }
282
hypervisor_specific_restore(&self, _data: AnySnapshot) -> anyhow::Result<()>283 fn hypervisor_specific_restore(&self, _data: AnySnapshot) -> anyhow::Result<()> {
284 unimplemented!()
285 }
286 }
287