• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2025 Google Inc. All rights reserved
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining
5  * a copy of this software and associated documentation files
6  * (the "Software"), to deal in the Software without restriction,
7  * including without limitation the rights to use, copy, modify, merge,
8  * publish, distribute, sublicense, and/or sell copies of the Software,
9  * and to permit persons to whom the Software is furnished to do so,
10  * subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be
13  * included in all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 use log::error;
25 use spin::Once;
26 
27 use hypervisor_backends::get_mem_sharer;
28 use hypervisor_backends::Error as HypError;
29 use hypervisor_backends::KvmError;
30 
31 #[cfg(target_arch = "aarch64")]
32 use hypervisor_backends::get_mmio_guard;
33 
34 #[cfg(target_arch = "aarch64")]
35 use rust_support::Error as LkError;
36 
37 /// Result type with kvm error.
38 pub type KvmResult<T> = Result<T, KvmError>;
39 
40 /// The mmio granule size used by the hypervisor
41 #[cfg(target_arch = "aarch64")]
42 static MMIO_GRANULE: Once<Result<usize, LkError>> = Once::new();
43 
44 #[cfg(target_arch = "aarch64")]
get_mmio_granule() -> Result<usize, LkError>45 fn get_mmio_granule() -> Result<usize, LkError> {
46     *MMIO_GRANULE.call_once(|| {
47         let hypervisor = get_mmio_guard()
48             .ok_or(LkError::ERR_NOT_SUPPORTED)
49             .inspect_err(|_| error!("failed to get hypervisor"))?;
50 
51         let granule = hypervisor
52             .granule()
53             .inspect_err(|e| error!("failed to get granule: {e:?}"))
54             .map_err(|_| LkError::ERR_NOT_SUPPORTED)?;
55 
56         if !granule.is_power_of_two() {
57             error!("invalid memory protection granule");
58             return Err(LkError::ERR_INVALID_ARGS);
59         }
60 
61         Ok(granule)
62     })
63 }
64 
65 /// # Safety
66 ///  - paddr must be a valid physical address
67 ///  - paddr + size must be a valid physical address
68 ///  - the caller must be aware that after the call the [paddr .. paddr + size] memory
69 ///    is available for reading by the host.
70 #[cfg(target_arch = "aarch64")]
mmio_map_region(paddr: usize, size: usize) -> Result<(), LkError>71 pub unsafe fn mmio_map_region(paddr: usize, size: usize) -> Result<(), LkError> {
72     let Some(hypervisor) = get_mmio_guard() else {
73         return Ok(());
74     };
75     let hypervisor_page_size = get_mmio_granule()?;
76 
77     if !paddr.is_multiple_of(hypervisor_page_size) {
78         error!("paddr not aligned");
79         return Err(LkError::ERR_INVALID_ARGS);
80     }
81 
82     if !size.is_multiple_of(hypervisor_page_size) {
83         error!("size ({size}) not aligned to page size ({hypervisor_page_size})");
84         return Err(LkError::ERR_INVALID_ARGS);
85     }
86 
87     for page in (paddr..paddr + size).step_by(hypervisor_page_size) {
88         hypervisor.map(page).map_err(|err| {
89             error!("failed to mmio guard map page 0x{page:x}: {err}");
90 
91             // unmap any previously shared mmio pages on error
92             // if sharing fail on the first page, the half-open range below is empty
93             for prev in (paddr..page).step_by(hypervisor_page_size) {
94                 // keep going even if we fail
95                 let _ = hypervisor.unmap(prev);
96             }
97 
98             match err {
99                 HypError::KvmError(KvmError::NotSupported, _) => LkError::ERR_NOT_SUPPORTED,
100                 HypError::KvmError(KvmError::InvalidParameter, _) => LkError::ERR_INVALID_ARGS,
101                 HypError::KvmError(_, _) => LkError::ERR_GENERIC,
102                 _ => panic!("MMIO Guard unmap returned unexpected error: {err:?}"),
103             }
104         })?;
105     }
106 
107     Ok(())
108 }
109 
110 /// The granule size used by the hypervisor
111 static GRANULE: Once<KvmResult<usize>> = Once::new();
112 
get_granule() -> KvmResult<usize>113 fn get_granule() -> KvmResult<usize> {
114     *GRANULE.call_once(|| {
115         let hypervisor = get_mem_sharer()
116             .ok_or(KvmError::NotSupported)
117             .inspect_err(|_| error!("failed to get hypervisor"))?;
118         let granule = hypervisor
119             .granule()
120             .inspect_err(|e| error!("failed to get granule: {e:?}"))
121             .map_err(|_| KvmError::NotSupported)?;
122         if !granule.is_power_of_two() {
123             error!("invalid memory protection granule");
124             return Err(KvmError::InvalidParameter);
125         }
126         Ok(granule)
127     })
128 }
129 
share_pages(paddr: usize, size: usize) -> KvmResult<()>130 pub fn share_pages(paddr: usize, size: usize) -> KvmResult<()> {
131     let hypervisor = match get_mem_sharer() {
132         Some(h) => h,
133         None => return Ok(()), // not in a protected vm
134     };
135 
136     let hypervisor_page_size = get_granule()?;
137 
138     if !paddr.is_multiple_of(hypervisor_page_size) {
139         error!("paddr not aligned");
140         return Err(KvmError::InvalidParameter);
141     }
142 
143     if !size.is_multiple_of(hypervisor_page_size) {
144         error!("size ({size}) not aligned to page size ({hypervisor_page_size})");
145         return Err(KvmError::InvalidParameter);
146     }
147 
148     for page in (paddr..paddr + size).step_by(hypervisor_page_size) {
149         hypervisor.share(page as u64).map_err(|err| {
150             error!("failed to share page 0x{page:x}: {err}");
151 
152             // unmap any previously shared pages on error
153             // if sharing fail on the first page, the half-open range below is empty
154             for prev in (paddr..page).step_by(hypervisor_page_size) {
155                 // keep going even if we fail
156                 let _ = hypervisor.unshare(prev as u64);
157             }
158 
159             match err {
160                 HypError::KvmError(e, _) => e,
161                 _ => panic!("unexpected share error: {err:?}"),
162             }
163         })?;
164     }
165 
166     Ok(())
167 }
168 
unshare_pages(paddr: usize, size: usize) -> KvmResult<()>169 pub fn unshare_pages(paddr: usize, size: usize) -> KvmResult<()> {
170     let hypervisor = match get_mem_sharer() {
171         Some(h) => h,
172         None => return Ok(()), // not in a protected vm
173     };
174 
175     let hypervisor_page_size = get_granule()?;
176 
177     if !paddr.is_multiple_of(hypervisor_page_size) {
178         error!("paddr not aligned");
179         return Err(KvmError::InvalidParameter);
180     }
181 
182     if !size.is_multiple_of(hypervisor_page_size) {
183         error!("size ({size}) not aligned to page size ({hypervisor_page_size})");
184         return Err(KvmError::InvalidParameter);
185     }
186 
187     for page in (paddr..paddr + size).step_by(hypervisor_page_size) {
188         hypervisor.unshare(page as u64).map_err(|err| {
189             error!("failed to unshare page 0x{page:x}: {err:?}");
190 
191             match err {
192                 HypError::KvmError(e, _) => e,
193                 _ => panic!("unexpected unshare error: {err:?}"),
194             }
195         })?;
196     }
197 
198     Ok(())
199 }
200