• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2025 Google Inc. All rights reserved
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining
5  * a copy of this software and associated documentation files
6  * (the "Software"), to deal in the Software without restriction,
7  * including without limitation the rights to use, copy, modify, merge,
8  * publish, distribute, sublicense, and/or sell copies of the Software,
9  * and to permit persons to whom the Software is furnished to do so,
10  * subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be
13  * included in all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #![no_std]
25 
26 use crate::sys::ext_mem_client_id_t;
27 use crate::sys::ext_mem_get_obj_size;
28 use crate::sys::ext_mem_get_vmm_obj;
29 use crate::sys::ext_mem_obj_id_t;
30 use crate::sys::ext_mem_obj_set_match_tag;
31 
32 use rust_support::lk_obj_ref_init;
33 use rust_support::mmu::PAGE_SIZE;
34 use rust_support::obj_ref;
35 use rust_support::status_t;
36 use rust_support::vmm::vmm_alloc_obj;
37 use rust_support::vmm::vmm_free_region;
38 use rust_support::vmm::vmm_get_kernel_aspace;
39 use rust_support::vmm::vmm_obj;
40 use rust_support::vmm::vmm_obj_del_ref;
41 use rust_support::Error;
42 
43 use core::ffi::c_void;
44 use core::ffi::CStr;
45 use core::mem::zeroed;
46 use core::mem::ManuallyDrop;
47 use core::ptr::null_mut;
48 use core::ptr::NonNull;
49 
50 mod sys {
51     #![allow(non_camel_case_types)]
52     #![allow(unused)]
53     use rust_support::obj_ref;
54     use rust_support::vmm::vmm_obj;
55     include!(env!("BINDGEN_INC_FILE"));
56 }
57 /// An external memory object with ownership of its mapped memory.
58 ///
59 /// Creating an ExtMemObj maps the object into the kernel address space and
60 /// dropping (either implicitly or with unmap_obj) unmaps the memory.
61 #[derive(Debug)]
62 pub struct ExtMemObj {
63     vaddr: NonNull<c_void>,
64     map_size: usize,
65 }
66 
67 // SAFETY: Once created the only modifications to the underlying vmm_obj allowed
68 // is unmapping it which requires exclusive access to the ExtMemObj. Ensuring
69 // accesses to the mapped memory are synchronized is delegated to the safety
70 // requirements on the get_vaddr method.
71 unsafe impl Sync for ExtMemObj {}
72 
73 // SAFETY: ExtMemObj may be sent between threads since any thread is allowed to
74 // unmap it, not just the thread that mapped it in. See safety comment on Sync
75 // impl for justification about calling get_vaddr from different threads.
76 unsafe impl Send for ExtMemObj {}
77 
78 impl ExtMemObj {
79     /// Maps an external memory object specified by `mem_obj_id` and `client_id`
80     /// into the kernel address space..
81     ///
82     /// `size` can be specified to map a subset of the memory object starting at
83     /// `offset`. If `size` is `None` the entire object is mapped in. It must be
84     /// always be a multiple of the page size.
85     #[allow(clippy::too_many_arguments)]
map_obj_kernel( name: &'static CStr, client_id: ext_mem_client_id_t, mem_obj_id: ext_mem_obj_id_t, tag: u64, offset: usize, size: Option<usize>, align_log2: u8, vmm_flags: u32, arch_mmu_flags: u32, ) -> Result<Self, Error>86     pub fn map_obj_kernel(
87         name: &'static CStr,
88         client_id: ext_mem_client_id_t,
89         mem_obj_id: ext_mem_obj_id_t,
90         tag: u64,
91         offset: usize,
92         size: Option<usize>,
93         align_log2: u8,
94         vmm_flags: u32,
95         arch_mmu_flags: u32,
96     ) -> Result<Self, Error> {
97         if let Some(sz) = size {
98             assert!(sz % PAGE_SIZE as usize == 0);
99         }
100 
101         let mut objp: *mut vmm_obj = null_mut();
102         // SAFETY: obj_ref is a C type with two pointers which can be zeroed.
103         // The obj_ref is initialized by lk_obj_ref_init before being used in
104         // ext_mem_get_vmm_obj and does not move out of this function so it's
105         // pointers to itself do not get invalidated.
106         let mut tmp_obj_ref: obj_ref = unsafe { zeroed() };
107         let tmp_obj_ref_ptr: *mut obj_ref = &raw mut tmp_obj_ref;
108         // SAFETY: This takes a pointer to an obj_ref that will not move for its
109         // entire lifetime.
110         unsafe {
111             lk_obj_ref_init(tmp_obj_ref_ptr);
112         }
113 
114         // SAFETY: This takes a vmm_obj and vmm_obj_ref pointers that are
115         // initialized to valid values and the error code is checked before
116         // using the resulting vmm_obj. The function is thread-safe so there can
117         // be no data race.
118         let rc = unsafe {
119             ext_mem_get_vmm_obj(
120                 client_id,
121                 mem_obj_id,
122                 tag,
123                 0, /* size hint */
124                 &raw mut objp,
125                 tmp_obj_ref_ptr,
126             )
127         };
128         if rc < 0 {
129             Error::from_lk(rc)?;
130         }
131 
132         // SAFETY: objp points to a valid vmm_obj since ext_mem_get_vmm_obj didn't return an error.
133         unsafe {
134             // match_tag must be set before mapping the object
135             ext_mem_obj_set_match_tag(objp, tag);
136         }
137 
138         let aspace = vmm_get_kernel_aspace();
139         let name = name.as_ptr();
140         let map_size = match size {
141             Some(sz) => sz,
142             None => {
143                 // SAFETY: This function requires a pointer to a vmm_obj within
144                 // a ext_mem_obj which is ensured by ext_mem_get_vmm_obj
145                 unsafe { ext_mem_get_obj_size(objp) }
146             }
147         };
148         let mut vaddr: *mut c_void = null_mut();
149         // SAFETY: name is static and will outlive the allocation and objp
150         // points to a valid vmm_obj because it was initialized in
151         // ext_mem_get_vmm_obj. The return code is checked before the resulting
152         // vaddr is used.
153         let rc = unsafe {
154             vmm_alloc_obj(
155                 aspace,
156                 name,
157                 objp,
158                 offset,
159                 map_size,
160                 &raw mut vaddr,
161                 align_log2,
162                 vmm_flags,
163                 arch_mmu_flags,
164             )
165         };
166         // SAFETY: vmm_alloc_obj took a reference to the vmm_obj so dropping the
167         // temporary reference create in this function will not drop the
168         // vmm_obj. Arguments are valid because they were initialized in
169         // ext_mem_get_vmm_obj and lk_obj_ref_init
170         unsafe { vmm_obj_del_ref(objp, tmp_obj_ref_ptr) }
171         if rc < 0 {
172             Error::from_lk(rc)?;
173         }
174         let vaddr = NonNull::new(vaddr).expect("vmm_alloc_obj returned a non-null pointer");
175         Ok(Self { vaddr, map_size })
176     }
177 
178     /// Get a pointer to the memory mapped into the kernel address space for the
179     /// memory object.
180     ///
181     /// # Safety
182     ///
183     /// Since the mapping is shared memory it may also be accessed from outside
184     /// Trusty (e.g. VMs) so the caller must ensure that accesses are
185     /// synchronized. Furthermore since ExtMemObj implements Sync these pointers
186     /// may be obtained from any thread in Trusty so the caller must also ensure
187     /// that accesses from different threads are synchronized. Finally the caller
188     /// must ensure that pointers are not accessed after the ExtMemObj is
189     /// dropped since that unmaps the memory from the kernel address space.
get_vaddr(&self) -> NonNull<c_void>190     pub unsafe fn get_vaddr(&self) -> NonNull<c_void> {
191         self.vaddr
192     }
193 
194     /// Get the size mapped for the external memory object.
get_size(&self) -> usize195     pub fn get_size(&self) -> usize {
196         self.map_size
197     }
198 
199     /// Unmaps the external memory object and returns whether the operation was
200     /// successful  or not.
201     ///
202     /// On failure this returns a tuple with an ExtMemObj for the same mapping
203     /// and a non-zero status_t returned by vmm_free_region.
unmap_obj(self) -> Result<(), (Self, Error)>204     pub fn unmap_obj(self) -> Result<(), (Self, Error)> {
205         let aspace = vmm_get_kernel_aspace();
206         // Skip dropping self to avoid calling vmm_free_region multiple times
207         let extmem = ManuallyDrop::new(self);
208         // SAFETY: This deletes the obj_ref created by vmm_alloc_obj unmapping
209         // the external memory object from the kernel address space.
210         let rc = unsafe { vmm_free_region(aspace, extmem.vaddr.as_ptr() as usize) };
211         if rc != 0 {
212             return Error::from_lk(rc).map_err(|e| (ManuallyDrop::into_inner(extmem), e));
213         }
214         Ok(())
215     }
216 }
217 
218 impl Drop for ExtMemObj {
219     /// Unmaps the external memory object.
220     ///
221     /// On failure this leaks the mapping without giving the caller a chance to
222     /// retry.
drop(&mut self)223     fn drop(&mut self) {
224         let aspace = vmm_get_kernel_aspace();
225         // SAFETY: This deletes the obj_ref created by vmm_alloc_obj unmapping
226         // the external memory object from the kernel address space.
227         let _rc: status_t = unsafe { vmm_free_region(aspace, self.vaddr.as_ptr() as usize) };
228     }
229 }
230