• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2022, The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //     http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 //! Low-level entry and exit points of pvmfw.
16 
17 use crate::config;
18 use crate::crypto;
19 use crate::fdt;
20 use crate::heap;
21 use crate::helpers;
22 use crate::helpers::RangeExt as _;
23 use crate::memory::{MemoryTracker, MEMORY};
24 use crate::mmu;
25 use crate::rand;
26 use core::arch::asm;
27 use core::mem::size_of;
28 use core::num::NonZeroUsize;
29 use core::ops::Range;
30 use core::slice;
31 use hyp::{get_hypervisor, HypervisorCap};
32 use log::debug;
33 use log::error;
34 use log::info;
35 use log::warn;
36 use log::LevelFilter;
37 use vmbase::{console, layout, logger, main, power::reboot};
38 
39 #[derive(Debug, Clone)]
40 pub enum RebootReason {
41     /// A malformed BCC was received.
42     InvalidBcc,
43     /// An invalid configuration was appended to pvmfw.
44     InvalidConfig,
45     /// An unexpected internal error happened.
46     InternalError,
47     /// The provided FDT was invalid.
48     InvalidFdt,
49     /// The provided payload was invalid.
50     InvalidPayload,
51     /// The provided ramdisk was invalid.
52     InvalidRamdisk,
53     /// Failed to verify the payload.
54     PayloadVerificationError,
55     /// DICE layering process failed.
56     SecretDerivationError,
57 }
58 
59 main!(start);
60 
61 /// Entry point for pVM firmware.
start(fdt_address: u64, payload_start: u64, payload_size: u64, _arg3: u64)62 pub fn start(fdt_address: u64, payload_start: u64, payload_size: u64, _arg3: u64) {
63     // Limitations in this function:
64     // - can't access non-pvmfw memory (only statically-mapped memory)
65     // - can't access MMIO (therefore, no logging)
66 
67     match main_wrapper(fdt_address as usize, payload_start as usize, payload_size as usize) {
68         Ok((entry, bcc)) => jump_to_payload(fdt_address, entry.try_into().unwrap(), bcc),
69         Err(_) => reboot(), // TODO(b/220071963) propagate the reason back to the host.
70     }
71 
72     // if we reach this point and return, vmbase::entry::rust_entry() will call power::shutdown().
73 }
74 
75 struct MemorySlices<'a> {
76     fdt: &'a mut libfdt::Fdt,
77     kernel: &'a [u8],
78     ramdisk: Option<&'a [u8]>,
79 }
80 
81 impl<'a> MemorySlices<'a> {
new( fdt: usize, kernel: usize, kernel_size: usize, memory: &mut MemoryTracker, ) -> Result<Self, RebootReason>82     fn new(
83         fdt: usize,
84         kernel: usize,
85         kernel_size: usize,
86         memory: &mut MemoryTracker,
87     ) -> Result<Self, RebootReason> {
88         // SAFETY - SIZE_2MB is non-zero.
89         const FDT_SIZE: NonZeroUsize = unsafe { NonZeroUsize::new_unchecked(helpers::SIZE_2MB) };
90         // TODO - Only map the FDT as read-only, until we modify it right before jump_to_payload()
91         // e.g. by generating a DTBO for a template DT in main() and, on return, re-map DT as RW,
92         // overwrite with the template DT and apply the DTBO.
93         let range = memory.alloc_mut(fdt, FDT_SIZE).map_err(|e| {
94             error!("Failed to allocate the FDT range: {e}");
95             RebootReason::InternalError
96         })?;
97 
98         // SAFETY - The tracker validated the range to be in main memory, mapped, and not overlap.
99         let fdt = unsafe { slice::from_raw_parts_mut(range.start as *mut u8, range.len()) };
100         let fdt = libfdt::Fdt::from_mut_slice(fdt).map_err(|e| {
101             error!("Failed to spawn the FDT wrapper: {e}");
102             RebootReason::InvalidFdt
103         })?;
104 
105         let info = fdt::sanitize_device_tree(fdt)?;
106         debug!("Fdt passed validation!");
107 
108         let memory_range = info.memory_range;
109         debug!("Resizing MemoryTracker to range {memory_range:#x?}");
110         memory.shrink(&memory_range).map_err(|_| {
111             error!("Failed to use memory range value from DT: {memory_range:#x?}");
112             RebootReason::InvalidFdt
113         })?;
114 
115         if get_hypervisor().has_cap(HypervisorCap::DYNAMIC_MEM_SHARE) {
116             memory.init_dynamic_shared_pool().map_err(|e| {
117                 error!("Failed to initialize dynamically shared pool: {e}");
118                 RebootReason::InternalError
119             })?;
120         } else {
121             let range = info.swiotlb_info.fixed_range().ok_or_else(|| {
122                 error!("Pre-shared pool range not specified in swiotlb node");
123                 RebootReason::InvalidFdt
124             })?;
125 
126             memory.init_static_shared_pool(range).map_err(|e| {
127                 error!("Failed to initialize pre-shared pool {e}");
128                 RebootReason::InvalidFdt
129             })?;
130         }
131 
132         let kernel_range = if let Some(r) = info.kernel_range {
133             memory.alloc_range(&r).map_err(|e| {
134                 error!("Failed to obtain the kernel range with DT range: {e}");
135                 RebootReason::InternalError
136             })?
137         } else if cfg!(feature = "legacy") {
138             warn!("Failed to find the kernel range in the DT; falling back to legacy ABI");
139 
140             let kernel_size = NonZeroUsize::new(kernel_size).ok_or_else(|| {
141                 error!("Invalid kernel size: {kernel_size:#x}");
142                 RebootReason::InvalidPayload
143             })?;
144 
145             memory.alloc(kernel, kernel_size).map_err(|e| {
146                 error!("Failed to obtain the kernel range with legacy range: {e}");
147                 RebootReason::InternalError
148             })?
149         } else {
150             error!("Failed to locate the kernel from the DT");
151             return Err(RebootReason::InvalidPayload);
152         };
153 
154         // SAFETY - The tracker validated the range to be in main memory, mapped, and not overlap.
155         let kernel =
156             unsafe { slice::from_raw_parts(kernel_range.start as *const u8, kernel_range.len()) };
157 
158         let ramdisk = if let Some(r) = info.initrd_range {
159             debug!("Located ramdisk at {r:?}");
160             let r = memory.alloc_range(&r).map_err(|e| {
161                 error!("Failed to obtain the initrd range: {e}");
162                 RebootReason::InvalidRamdisk
163             })?;
164 
165             // SAFETY - The region was validated by memory to be in main memory, mapped, and
166             // not overlap.
167             Some(unsafe { slice::from_raw_parts(r.start as *const u8, r.len()) })
168         } else {
169             info!("Couldn't locate the ramdisk from the device tree");
170             None
171         };
172 
173         Ok(Self { fdt, kernel, ramdisk })
174     }
175 }
176 
177 /// Sets up the environment for main() and wraps its result for start().
178 ///
179 /// Provide the abstractions necessary for start() to abort the pVM boot and for main() to run with
180 /// the assumption that its environment has been properly configured.
main_wrapper( fdt: usize, payload: usize, payload_size: usize, ) -> Result<(usize, Range<usize>), RebootReason>181 fn main_wrapper(
182     fdt: usize,
183     payload: usize,
184     payload_size: usize,
185 ) -> Result<(usize, Range<usize>), RebootReason> {
186     // Limitations in this function:
187     // - only access MMIO once (and while) it has been mapped and configured
188     // - only perform logging once the logger has been initialized
189     // - only access non-pvmfw memory once (and while) it has been mapped
190 
191     // SAFETY - This function should and will only be called once, here.
192     unsafe { heap::init() };
193 
194     logger::init(LevelFilter::Info).map_err(|_| RebootReason::InternalError)?;
195 
196     // Use debug!() to avoid printing to the UART if we failed to configure it as only local
197     // builds that have tweaked the logger::init() call will actually attempt to log the message.
198 
199     get_hypervisor().mmio_guard_init().map_err(|e| {
200         debug!("{e}");
201         RebootReason::InternalError
202     })?;
203 
204     get_hypervisor().mmio_guard_map(console::BASE_ADDRESS).map_err(|e| {
205         debug!("Failed to configure the UART: {e}");
206         RebootReason::InternalError
207     })?;
208 
209     crypto::init();
210 
211     // SAFETY - We only get the appended payload from here, once. It is mapped and the linker
212     // script prevents it from overlapping with other objects.
213     let appended_data = unsafe { get_appended_data_slice() };
214 
215     // Up to this point, we were using the built-in static (from .rodata) page tables.
216 
217     let mut page_table = mmu::PageTable::from_static_layout().map_err(|e| {
218         error!("Failed to set up the dynamic page tables: {e}");
219         RebootReason::InternalError
220     })?;
221 
222     const CONSOLE_LEN: usize = 1; // vmbase::uart::Uart only uses one u8 register.
223     let uart_range = console::BASE_ADDRESS..(console::BASE_ADDRESS + CONSOLE_LEN);
224     page_table.map_device(&uart_range).map_err(|e| {
225         error!("Failed to remap the UART as a dynamic page table entry: {e}");
226         RebootReason::InternalError
227     })?;
228 
229     // SAFETY - We only get the appended payload from here, once. It is statically mapped and the
230     // linker script prevents it from overlapping with other objects.
231     let mut appended = unsafe { AppendedPayload::new(appended_data) }.ok_or_else(|| {
232         error!("No valid configuration found");
233         RebootReason::InvalidConfig
234     })?;
235 
236     let (bcc_slice, debug_policy) = appended.get_entries();
237 
238     debug!("Activating dynamic page table...");
239     // SAFETY - page_table duplicates the static mappings for everything that the Rust code is
240     // aware of so activating it shouldn't have any visible effect.
241     unsafe { page_table.activate() };
242     debug!("... Success!");
243 
244     MEMORY.lock().replace(MemoryTracker::new(page_table));
245     let slices = MemorySlices::new(fdt, payload, payload_size, MEMORY.lock().as_mut().unwrap())?;
246 
247     rand::init().map_err(|e| {
248         error!("Failed to initialize rand: {e}");
249         RebootReason::InternalError
250     })?;
251 
252     // This wrapper allows main() to be blissfully ignorant of platform details.
253     let next_bcc = crate::main(
254         slices.fdt,
255         slices.kernel,
256         slices.ramdisk,
257         bcc_slice,
258         debug_policy,
259         MEMORY.lock().as_mut().unwrap(),
260     )?;
261 
262     helpers::flushed_zeroize(bcc_slice);
263 
264     info!("Expecting a bug making MMIO_GUARD_UNMAP return NOT_SUPPORTED on success");
265     MEMORY.lock().as_mut().unwrap().mmio_unmap_all().map_err(|e| {
266         error!("Failed to unshare MMIO ranges: {e}");
267         RebootReason::InternalError
268     })?;
269     // Call unshare_all_memory here (instead of relying on the dtor) while UART is still mapped.
270     MEMORY.lock().as_mut().unwrap().unshare_all_memory();
271     get_hypervisor().mmio_guard_unmap(console::BASE_ADDRESS).map_err(|e| {
272         error!("Failed to unshare the UART: {e}");
273         RebootReason::InternalError
274     })?;
275     MEMORY.lock().take().unwrap();
276 
277     Ok((slices.kernel.as_ptr() as usize, next_bcc))
278 }
279 
jump_to_payload(fdt_address: u64, payload_start: u64, bcc: Range<usize>) -> !280 fn jump_to_payload(fdt_address: u64, payload_start: u64, bcc: Range<usize>) -> ! {
281     const ASM_STP_ALIGN: usize = size_of::<u64>() * 2;
282     const SCTLR_EL1_RES1: u64 = (0b11 << 28) | (0b101 << 20) | (0b1 << 11);
283     // Stage 1 instruction access cacheability is unaffected.
284     const SCTLR_EL1_I: u64 = 0b1 << 12;
285     // SETEND instruction disabled at EL0 in aarch32 mode.
286     const SCTLR_EL1_SED: u64 = 0b1 << 8;
287     // Various IT instructions are disabled at EL0 in aarch32 mode.
288     const SCTLR_EL1_ITD: u64 = 0b1 << 7;
289 
290     const SCTLR_EL1_VAL: u64 = SCTLR_EL1_RES1 | SCTLR_EL1_ITD | SCTLR_EL1_SED | SCTLR_EL1_I;
291 
292     let scratch = layout::scratch_range();
293 
294     assert_ne!(scratch.len(), 0, "scratch memory is empty.");
295     assert_eq!(scratch.start % ASM_STP_ALIGN, 0, "scratch memory is misaligned.");
296     assert_eq!(scratch.end % ASM_STP_ALIGN, 0, "scratch memory is misaligned.");
297 
298     assert!(bcc.is_within(&scratch));
299     assert_eq!(bcc.start % ASM_STP_ALIGN, 0, "Misaligned guest BCC.");
300     assert_eq!(bcc.end % ASM_STP_ALIGN, 0, "Misaligned guest BCC.");
301 
302     let stack = mmu::stack_range();
303 
304     assert_ne!(stack.len(), 0, "stack region is empty.");
305     assert_eq!(stack.start % ASM_STP_ALIGN, 0, "Misaligned stack region.");
306     assert_eq!(stack.end % ASM_STP_ALIGN, 0, "Misaligned stack region.");
307 
308     // Zero all memory that could hold secrets and that can't be safely written to from Rust.
309     // Disable the exception vector, caches and page table and then jump to the payload at the
310     // given address, passing it the given FDT pointer.
311     //
312     // SAFETY - We're exiting pvmfw by passing the register values we need to a noreturn asm!().
313     unsafe {
314         asm!(
315             "cmp {scratch}, {bcc}",
316             "b.hs 1f",
317 
318             // Zero .data & .bss until BCC.
319             "0: stp xzr, xzr, [{scratch}], 16",
320             "cmp {scratch}, {bcc}",
321             "b.lo 0b",
322 
323             "1:",
324             // Skip BCC.
325             "mov {scratch}, {bcc_end}",
326             "cmp {scratch}, {scratch_end}",
327             "b.hs 1f",
328 
329             // Keep zeroing .data & .bss.
330             "0: stp xzr, xzr, [{scratch}], 16",
331             "cmp {scratch}, {scratch_end}",
332             "b.lo 0b",
333 
334             "1:",
335             // Flush d-cache over .data & .bss (including BCC).
336             "0: dc cvau, {cache_line}",
337             "add {cache_line}, {cache_line}, {dcache_line_size}",
338             "cmp {cache_line}, {scratch_end}",
339             "b.lo 0b",
340 
341             "mov {cache_line}, {stack}",
342             // Zero stack region.
343             "0: stp xzr, xzr, [{stack}], 16",
344             "cmp {stack}, {stack_end}",
345             "b.lo 0b",
346 
347             // Flush d-cache over stack region.
348             "0: dc cvau, {cache_line}",
349             "add {cache_line}, {cache_line}, {dcache_line_size}",
350             "cmp {cache_line}, {stack_end}",
351             "b.lo 0b",
352 
353             "msr sctlr_el1, {sctlr_el1_val}",
354             "isb",
355             "mov x1, xzr",
356             "mov x2, xzr",
357             "mov x3, xzr",
358             "mov x4, xzr",
359             "mov x5, xzr",
360             "mov x6, xzr",
361             "mov x7, xzr",
362             "mov x8, xzr",
363             "mov x9, xzr",
364             "mov x10, xzr",
365             "mov x11, xzr",
366             "mov x12, xzr",
367             "mov x13, xzr",
368             "mov x14, xzr",
369             "mov x15, xzr",
370             "mov x16, xzr",
371             "mov x17, xzr",
372             "mov x18, xzr",
373             "mov x19, xzr",
374             "mov x20, xzr",
375             "mov x21, xzr",
376             "mov x22, xzr",
377             "mov x23, xzr",
378             "mov x24, xzr",
379             "mov x25, xzr",
380             "mov x26, xzr",
381             "mov x27, xzr",
382             "mov x28, xzr",
383             "mov x29, xzr",
384             "msr ttbr0_el1, xzr",
385             // Ensure that CMOs have completed before entering payload.
386             "dsb nsh",
387             "br x30",
388             sctlr_el1_val = in(reg) SCTLR_EL1_VAL,
389             bcc = in(reg) u64::try_from(bcc.start).unwrap(),
390             bcc_end = in(reg) u64::try_from(bcc.end).unwrap(),
391             cache_line = in(reg) u64::try_from(scratch.start).unwrap(),
392             scratch = in(reg) u64::try_from(scratch.start).unwrap(),
393             scratch_end = in(reg) u64::try_from(scratch.end).unwrap(),
394             stack = in(reg) u64::try_from(stack.start).unwrap(),
395             stack_end = in(reg) u64::try_from(stack.end).unwrap(),
396             dcache_line_size = in(reg) u64::try_from(helpers::min_dcache_line_size()).unwrap(),
397             in("x0") fdt_address,
398             in("x30") payload_start,
399             options(noreturn),
400         );
401     };
402 }
403 
get_appended_data_slice() -> &'static mut [u8]404 unsafe fn get_appended_data_slice() -> &'static mut [u8] {
405     let base = helpers::align_up(layout::binary_end(), helpers::SIZE_4KB).unwrap();
406     // pvmfw is contained in a 2MiB region so the payload can't be larger than the 2MiB alignment.
407     let size = helpers::align_up(base, helpers::SIZE_2MB).unwrap() - base;
408 
409     // SAFETY: This region is mapped and the linker script prevents it from overlapping with other
410     // objects.
411     unsafe { slice::from_raw_parts_mut(base as *mut u8, size) }
412 }
413 
414 enum AppendedConfigType {
415     Valid,
416     Invalid,
417     NotFound,
418 }
419 
420 enum AppendedPayload<'a> {
421     /// Configuration data.
422     Config(config::Config<'a>),
423     /// Deprecated raw BCC, as used in Android T.
424     LegacyBcc(&'a mut [u8]),
425 }
426 
427 impl<'a> AppendedPayload<'a> {
428     /// SAFETY - 'data' should respect the alignment of config::Header.
new(data: &'a mut [u8]) -> Option<Self>429     unsafe fn new(data: &'a mut [u8]) -> Option<Self> {
430         // Safety: This fn has the same constraint as us.
431         match unsafe { Self::guess_config_type(data) } {
432             AppendedConfigType::Valid => {
433                 // Safety: This fn has the same constraint as us.
434                 let config = unsafe { config::Config::new(data) };
435                 Some(Self::Config(config.unwrap()))
436             }
437             AppendedConfigType::NotFound if cfg!(feature = "legacy") => {
438                 const BCC_SIZE: usize = helpers::SIZE_4KB;
439                 warn!("Assuming the appended data at {:?} to be a raw BCC", data.as_ptr());
440                 Some(Self::LegacyBcc(&mut data[..BCC_SIZE]))
441             }
442             _ => None,
443         }
444     }
445 
446     /// SAFETY - 'data' should respect the alignment of config::Header.
guess_config_type(data: &mut [u8]) -> AppendedConfigType447     unsafe fn guess_config_type(data: &mut [u8]) -> AppendedConfigType {
448         // This function is necessary to prevent the borrow checker from getting confused
449         // about the ownership of data in new(); see https://users.rust-lang.org/t/78467.
450         let addr = data.as_ptr();
451 
452         // Safety: This fn has the same constraint as us.
453         match unsafe { config::Config::new(data) } {
454             Err(config::Error::InvalidMagic) => {
455                 warn!("No configuration data found at {addr:?}");
456                 AppendedConfigType::NotFound
457             }
458             Err(e) => {
459                 error!("Invalid configuration data at {addr:?}: {e}");
460                 AppendedConfigType::Invalid
461             }
462             Ok(_) => AppendedConfigType::Valid,
463         }
464     }
465 
get_entries(&mut self) -> (&mut [u8], Option<&mut [u8]>)466     fn get_entries(&mut self) -> (&mut [u8], Option<&mut [u8]>) {
467         match self {
468             Self::Config(ref mut cfg) => cfg.get_entries(),
469             Self::LegacyBcc(ref mut bcc) => (bcc, None),
470         }
471     }
472 }
473