• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2018 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 use std::collections::BTreeMap;
6 use std::io;
7 use std::io::Read;
8 use std::io::Write;
9 use std::ops::BitOrAssign;
10 
11 use anyhow::anyhow;
12 use anyhow::Context;
13 use base::error;
14 use base::Event;
15 use base::EventToken;
16 use base::RawDescriptor;
17 use base::WaitContext;
18 use base::WorkerThread;
19 use remain::sorted;
20 use thiserror::Error;
21 use vm_memory::GuestMemory;
22 
23 use super::DescriptorChain;
24 use super::DeviceType;
25 use super::Interrupt;
26 use super::Queue;
27 use super::VirtioDevice;
28 
29 // A single queue of size 2. The guest kernel driver will enqueue a single
30 // descriptor chain containing one command buffer and one response buffer at a
31 // time.
32 const QUEUE_SIZE: u16 = 2;
33 const QUEUE_SIZES: &[u16] = &[QUEUE_SIZE];
34 
35 // Maximum command or response message size permitted by this device
36 // implementation. Named to match the equivalent constant in Linux's tpm.h.
37 // There is no hard requirement that the value is the same but it makes sense.
38 const TPM_BUFSIZE: usize = 4096;
39 
40 struct Worker {
41     interrupt: Interrupt,
42     queue: Queue,
43     backend: Box<dyn TpmBackend>,
44 }
45 
46 pub trait TpmBackend: Send {
execute_command<'a>(&'a mut self, command: &[u8]) -> &'a [u8]47     fn execute_command<'a>(&'a mut self, command: &[u8]) -> &'a [u8];
48 }
49 
50 impl Worker {
perform_work(&mut self, desc: &mut DescriptorChain) -> Result<u32>51     fn perform_work(&mut self, desc: &mut DescriptorChain) -> Result<u32> {
52         let available_bytes = desc.reader.available_bytes();
53         if available_bytes > TPM_BUFSIZE {
54             return Err(Error::CommandTooLong {
55                 size: available_bytes,
56             });
57         }
58 
59         let mut command = vec![0u8; available_bytes];
60         desc.reader.read_exact(&mut command).map_err(Error::Read)?;
61 
62         let response = self.backend.execute_command(&command);
63 
64         if response.len() > TPM_BUFSIZE {
65             return Err(Error::ResponseTooLong {
66                 size: response.len(),
67             });
68         }
69 
70         let writer_len = desc.writer.available_bytes();
71         if response.len() > writer_len {
72             return Err(Error::BufferTooSmall {
73                 size: writer_len,
74                 required: response.len(),
75             });
76         }
77 
78         desc.writer.write_all(response).map_err(Error::Write)?;
79 
80         Ok(desc.writer.bytes_written() as u32)
81     }
82 
process_queue(&mut self) -> NeedsInterrupt83     fn process_queue(&mut self) -> NeedsInterrupt {
84         let mut needs_interrupt = NeedsInterrupt::No;
85         while let Some(mut avail_desc) = self.queue.pop() {
86             let len = match self.perform_work(&mut avail_desc) {
87                 Ok(len) => len,
88                 Err(err) => {
89                     error!("{}", err);
90                     0
91                 }
92             };
93 
94             self.queue.add_used(avail_desc, len);
95             needs_interrupt = NeedsInterrupt::Yes;
96         }
97 
98         needs_interrupt
99     }
100 
run(mut self, kill_evt: Event)101     fn run(mut self, kill_evt: Event) {
102         #[derive(EventToken, Debug)]
103         enum Token {
104             // A request is ready on the queue.
105             QueueAvailable,
106             // Check if any interrupts need to be re-asserted.
107             InterruptResample,
108             // The parent thread requested an exit.
109             Kill,
110         }
111 
112         let wait_ctx = match WaitContext::build_with(&[
113             (self.queue.event(), Token::QueueAvailable),
114             (&kill_evt, Token::Kill),
115         ])
116         .and_then(|wc| {
117             if let Some(resample_evt) = self.interrupt.get_resample_evt() {
118                 wc.add(resample_evt, Token::InterruptResample)?;
119             }
120             Ok(wc)
121         }) {
122             Ok(pc) => pc,
123             Err(e) => {
124                 error!("vtpm failed creating WaitContext: {}", e);
125                 return;
126             }
127         };
128 
129         'wait: loop {
130             let events = match wait_ctx.wait() {
131                 Ok(v) => v,
132                 Err(e) => {
133                     error!("vtpm failed waiting for events: {}", e);
134                     break;
135                 }
136             };
137 
138             let mut needs_interrupt = NeedsInterrupt::No;
139             for event in events.iter().filter(|e| e.is_readable) {
140                 match event.token {
141                     Token::QueueAvailable => {
142                         if let Err(e) = self.queue.event().wait() {
143                             error!("vtpm failed reading queue Event: {}", e);
144                             break 'wait;
145                         }
146                         needs_interrupt |= self.process_queue();
147                     }
148                     Token::InterruptResample => {
149                         self.interrupt.interrupt_resample();
150                     }
151                     Token::Kill => break 'wait,
152                 }
153             }
154             if needs_interrupt == NeedsInterrupt::Yes {
155                 self.queue.trigger_interrupt(&self.interrupt);
156             }
157         }
158     }
159 }
160 
161 /// Virtio vTPM device.
162 pub struct Tpm {
163     backend: Option<Box<dyn TpmBackend>>,
164     worker_thread: Option<WorkerThread<()>>,
165     features: u64,
166 }
167 
168 impl Tpm {
new(backend: Box<dyn TpmBackend>, base_features: u64) -> Tpm169     pub fn new(backend: Box<dyn TpmBackend>, base_features: u64) -> Tpm {
170         Tpm {
171             backend: Some(backend),
172             worker_thread: None,
173             features: base_features,
174         }
175     }
176 }
177 
178 impl VirtioDevice for Tpm {
keep_rds(&self) -> Vec<RawDescriptor>179     fn keep_rds(&self) -> Vec<RawDescriptor> {
180         Vec::new()
181     }
182 
device_type(&self) -> DeviceType183     fn device_type(&self) -> DeviceType {
184         DeviceType::Tpm
185     }
186 
queue_max_sizes(&self) -> &[u16]187     fn queue_max_sizes(&self) -> &[u16] {
188         QUEUE_SIZES
189     }
190 
features(&self) -> u64191     fn features(&self) -> u64 {
192         self.features
193     }
194 
activate( &mut self, _mem: GuestMemory, interrupt: Interrupt, mut queues: BTreeMap<usize, Queue>, ) -> anyhow::Result<()>195     fn activate(
196         &mut self,
197         _mem: GuestMemory,
198         interrupt: Interrupt,
199         mut queues: BTreeMap<usize, Queue>,
200     ) -> anyhow::Result<()> {
201         if queues.len() != 1 {
202             return Err(anyhow!("expected 1 queue, got {}", queues.len()));
203         }
204         let queue = queues.pop_first().unwrap().1;
205 
206         let backend = self.backend.take().context("no backend in vtpm")?;
207 
208         let worker = Worker {
209             interrupt,
210             queue,
211             backend,
212         };
213 
214         self.worker_thread = Some(WorkerThread::start("v_tpm", |kill_evt| {
215             worker.run(kill_evt)
216         }));
217 
218         Ok(())
219     }
220 }
221 
222 #[derive(PartialEq, Eq)]
223 enum NeedsInterrupt {
224     Yes,
225     No,
226 }
227 
228 impl BitOrAssign for NeedsInterrupt {
bitor_assign(&mut self, rhs: NeedsInterrupt)229     fn bitor_assign(&mut self, rhs: NeedsInterrupt) {
230         if rhs == NeedsInterrupt::Yes {
231             *self = NeedsInterrupt::Yes;
232         }
233     }
234 }
235 
236 type Result<T> = std::result::Result<T, Error>;
237 
238 #[sorted]
239 #[derive(Error, Debug)]
240 enum Error {
241     #[error("vtpm response buffer is too small: {size} < {required} bytes")]
242     BufferTooSmall { size: usize, required: usize },
243     #[error("vtpm command is too long: {size} > {} bytes", TPM_BUFSIZE)]
244     CommandTooLong { size: usize },
245     #[error("vtpm failed to read from guest memory: {0}")]
246     Read(io::Error),
247     #[error(
248         "vtpm simulator generated a response that is unexpectedly long: {size} > {} bytes",
249         TPM_BUFSIZE
250     )]
251     ResponseTooLong { size: usize },
252     #[error("vtpm failed to write to guest memory: {0}")]
253     Write(io::Error),
254 }
255