1 // Copyright 2018 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 use std::env;
6 use std::fmt::{self, Display};
7 use std::fs;
8 use std::ops::BitOrAssign;
9 use std::os::unix::io::RawFd;
10 use std::path::PathBuf;
11 use std::sync::atomic::{AtomicUsize, Ordering};
12 use std::sync::Arc;
13 use std::thread;
14
15 use sys_util::{error, EventFd, GuestMemory, GuestMemoryError, PollContext, PollToken};
16 use tpm2;
17
18 use super::{DescriptorChain, Queue, VirtioDevice, INTERRUPT_STATUS_USED_RING, TYPE_TPM};
19
20 // A single queue of size 2. The guest kernel driver will enqueue a single
21 // descriptor chain containing one command buffer and one response buffer at a
22 // time.
23 const QUEUE_SIZE: u16 = 2;
24 const QUEUE_SIZES: &[u16] = &[QUEUE_SIZE];
25
26 // Maximum command or response message size permitted by this device
27 // implementation. Named to match the equivalent constant in Linux's tpm.h.
28 // There is no hard requirement that the value is the same but it makes sense.
29 const TPM_BUFSIZE: usize = 4096;
30
31 struct Worker {
32 queue: Queue,
33 mem: GuestMemory,
34 interrupt_status: Arc<AtomicUsize>,
35 queue_evt: EventFd,
36 kill_evt: EventFd,
37 interrupt_evt: EventFd,
38 interrupt_resample_evt: EventFd,
39 device: Device,
40 }
41
42 struct Device {
43 simulator: tpm2::Simulator,
44 }
45
46 // Checks that the input descriptor chain holds a read-only descriptor followed
47 // by a write-only descriptor, as required of the guest's virtio tpm driver.
48 //
49 // Returns those descriptors as a tuple: `(read_desc, write_desc)`.
two_input_descriptors(desc: DescriptorChain) -> Result<(DescriptorChain, DescriptorChain)>50 fn two_input_descriptors(desc: DescriptorChain) -> Result<(DescriptorChain, DescriptorChain)> {
51 let read_desc = desc;
52 if !read_desc.is_read_only() {
53 return Err(Error::ExpectedReadOnly);
54 }
55
56 let write_desc = match read_desc.next_descriptor() {
57 Some(desc) => desc,
58 None => return Err(Error::ExpectedSecondBuffer),
59 };
60
61 if !write_desc.is_write_only() {
62 return Err(Error::ExpectedWriteOnly);
63 }
64
65 Ok((read_desc, write_desc))
66 }
67
68 impl Device {
perform_work(&mut self, mem: &GuestMemory, desc: DescriptorChain) -> Result<u32>69 fn perform_work(&mut self, mem: &GuestMemory, desc: DescriptorChain) -> Result<u32> {
70 let (read_desc, write_desc) = two_input_descriptors(desc)?;
71
72 if read_desc.len > TPM_BUFSIZE as u32 {
73 return Err(Error::CommandTooLong {
74 size: read_desc.len as usize,
75 });
76 }
77
78 let mut command = vec![0u8; read_desc.len as usize];
79 mem.read_exact_at_addr(&mut command, read_desc.addr)
80 .map_err(Error::Read)?;
81
82 let response = self.simulator.execute_command(&command);
83
84 if response.len() > TPM_BUFSIZE {
85 return Err(Error::ResponseTooLong {
86 size: response.len(),
87 });
88 }
89
90 if response.len() > write_desc.len as usize {
91 return Err(Error::BufferTooSmall {
92 size: write_desc.len as usize,
93 required: response.len(),
94 });
95 }
96
97 mem.write_all_at_addr(&response, write_desc.addr)
98 .map_err(Error::Write)?;
99
100 Ok(response.len() as u32)
101 }
102 }
103
104 impl Worker {
process_queue(&mut self) -> NeedsInterrupt105 fn process_queue(&mut self) -> NeedsInterrupt {
106 let avail_desc = match self.queue.pop(&self.mem) {
107 Some(avail_desc) => avail_desc,
108 None => return NeedsInterrupt::No,
109 };
110
111 let index = avail_desc.index;
112
113 let len = match self.device.perform_work(&self.mem, avail_desc) {
114 Ok(len) => len,
115 Err(err) => {
116 error!("{}", err);
117 0
118 }
119 };
120
121 self.queue.add_used(&self.mem, index, len);
122 NeedsInterrupt::Yes
123 }
124
signal_used_queue(&self)125 fn signal_used_queue(&self) {
126 self.interrupt_status
127 .fetch_or(INTERRUPT_STATUS_USED_RING as usize, Ordering::SeqCst);
128 let _ = self.interrupt_evt.write(1);
129 }
130
run(mut self)131 fn run(mut self) {
132 #[derive(PollToken, Debug)]
133 enum Token {
134 // A request is ready on the queue.
135 QueueAvailable,
136 // Check if any interrupts need to be re-asserted.
137 InterruptResample,
138 // The parent thread requested an exit.
139 Kill,
140 }
141
142 let poll_ctx = match PollContext::new()
143 .and_then(|pc| pc.add(&self.queue_evt, Token::QueueAvailable).and(Ok(pc)))
144 .and_then(|pc| {
145 pc.add(&self.interrupt_resample_evt, Token::InterruptResample)
146 .and(Ok(pc))
147 })
148 .and_then(|pc| pc.add(&self.kill_evt, Token::Kill).and(Ok(pc)))
149 {
150 Ok(pc) => pc,
151 Err(e) => {
152 error!("vtpm failed creating PollContext: {}", e);
153 return;
154 }
155 };
156
157 'poll: loop {
158 let events = match poll_ctx.wait() {
159 Ok(v) => v,
160 Err(e) => {
161 error!("vtpm failed polling for events: {}", e);
162 break;
163 }
164 };
165
166 let mut needs_interrupt = NeedsInterrupt::No;
167 for event in events.iter_readable() {
168 match event.token() {
169 Token::QueueAvailable => {
170 if let Err(e) = self.queue_evt.read() {
171 error!("vtpm failed reading queue EventFd: {}", e);
172 break 'poll;
173 }
174 needs_interrupt |= self.process_queue();
175 }
176 Token::InterruptResample => {
177 let _ = self.interrupt_resample_evt.read();
178 if self.interrupt_status.load(Ordering::SeqCst) != 0 {
179 let _ = self.interrupt_evt.write(1);
180 }
181 }
182 Token::Kill => break 'poll,
183 }
184 }
185 if needs_interrupt == NeedsInterrupt::Yes {
186 self.signal_used_queue();
187 }
188 }
189 }
190 }
191
192 /// Virtio vTPM device.
193 pub struct Tpm {
194 storage: PathBuf,
195 kill_evt: Option<EventFd>,
196 }
197
198 impl Tpm {
new(storage: PathBuf) -> Tpm199 pub fn new(storage: PathBuf) -> Tpm {
200 Tpm {
201 storage,
202 kill_evt: None,
203 }
204 }
205 }
206
207 impl Drop for Tpm {
drop(&mut self)208 fn drop(&mut self) {
209 if let Some(kill_evt) = self.kill_evt.take() {
210 let _ = kill_evt.write(1);
211 }
212 }
213 }
214
215 impl VirtioDevice for Tpm {
keep_fds(&self) -> Vec<RawFd>216 fn keep_fds(&self) -> Vec<RawFd> {
217 Vec::new()
218 }
219
device_type(&self) -> u32220 fn device_type(&self) -> u32 {
221 TYPE_TPM
222 }
223
queue_max_sizes(&self) -> &[u16]224 fn queue_max_sizes(&self) -> &[u16] {
225 QUEUE_SIZES
226 }
227
activate( &mut self, mem: GuestMemory, interrupt_evt: EventFd, interrupt_resample_evt: EventFd, interrupt_status: Arc<AtomicUsize>, mut queues: Vec<Queue>, mut queue_evts: Vec<EventFd>, )228 fn activate(
229 &mut self,
230 mem: GuestMemory,
231 interrupt_evt: EventFd,
232 interrupt_resample_evt: EventFd,
233 interrupt_status: Arc<AtomicUsize>,
234 mut queues: Vec<Queue>,
235 mut queue_evts: Vec<EventFd>,
236 ) {
237 if queues.len() != 1 || queue_evts.len() != 1 {
238 return;
239 }
240 let queue = queues.remove(0);
241 let queue_evt = queue_evts.remove(0);
242
243 if let Err(err) = fs::create_dir_all(&self.storage) {
244 error!("vtpm failed to create directory for simulator: {}", err);
245 return;
246 }
247 if let Err(err) = env::set_current_dir(&self.storage) {
248 error!("vtpm failed to change into simulator directory: {}", err);
249 return;
250 }
251 let simulator = tpm2::Simulator::singleton_in_current_directory();
252
253 let (self_kill_evt, kill_evt) = match EventFd::new().and_then(|e| Ok((e.try_clone()?, e))) {
254 Ok(v) => v,
255 Err(err) => {
256 error!("vtpm failed to create kill EventFd pair: {}", err);
257 return;
258 }
259 };
260 self.kill_evt = Some(self_kill_evt);
261
262 let worker = Worker {
263 queue,
264 mem,
265 interrupt_status,
266 queue_evt,
267 interrupt_evt,
268 interrupt_resample_evt,
269 kill_evt,
270 device: Device { simulator },
271 };
272
273 let worker_result = thread::Builder::new()
274 .name("virtio_tpm".to_string())
275 .spawn(|| worker.run());
276
277 if let Err(e) = worker_result {
278 error!("vtpm failed to spawn virtio_tpm worker: {}", e);
279 return;
280 }
281 }
282 }
283
284 #[derive(PartialEq)]
285 enum NeedsInterrupt {
286 Yes,
287 No,
288 }
289
290 impl BitOrAssign for NeedsInterrupt {
bitor_assign(&mut self, rhs: NeedsInterrupt)291 fn bitor_assign(&mut self, rhs: NeedsInterrupt) {
292 if rhs == NeedsInterrupt::Yes {
293 *self = NeedsInterrupt::Yes;
294 }
295 }
296 }
297
298 type Result<T> = std::result::Result<T, Error>;
299
300 enum Error {
301 ExpectedReadOnly,
302 ExpectedSecondBuffer,
303 ExpectedWriteOnly,
304 CommandTooLong { size: usize },
305 Read(GuestMemoryError),
306 ResponseTooLong { size: usize },
307 BufferTooSmall { size: usize, required: usize },
308 Write(GuestMemoryError),
309 }
310
311 impl Display for Error {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result312 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
313 use self::Error::*;
314
315 match self {
316 ExpectedReadOnly => write!(f, "vtpm expected first descriptor to be read-only"),
317 ExpectedSecondBuffer => write!(f, "vtpm expected a second descriptor"),
318 ExpectedWriteOnly => write!(f, "vtpm expected second descriptor to be write-only"),
319 CommandTooLong { size } => write!(
320 f,
321 "vtpm command is too long: {} > {} bytes",
322 size, TPM_BUFSIZE
323 ),
324 Read(e) => write!(f, "vtpm failed to read from guest memory: {}", e),
325 ResponseTooLong { size } => write!(
326 f,
327 "vtpm simulator generated a response that is unexpectedly long: {} > {} bytes",
328 size, TPM_BUFSIZE
329 ),
330 BufferTooSmall { size, required } => write!(
331 f,
332 "vtpm response buffer is too small: {} < {} bytes",
333 size, required
334 ),
335 Write(e) => write!(f, "vtpm failed to write to guest memory: {}", e),
336 }
337 }
338 }
339