1 // Copyright 2022 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 //! VirtioDevice implementation for the VMM side of a vhost-user connection.
6
7 mod error;
8 mod fs;
9 mod handler;
10 mod sys;
11 mod worker;
12
13 use std::cell::RefCell;
14 use std::collections::BTreeMap;
15 use std::sync::Arc;
16
17 use anyhow::Context;
18 use base::error;
19 use base::trace;
20 use base::AsRawDescriptor;
21 use base::Event;
22 use base::RawDescriptor;
23 use base::WorkerThread;
24 use serde_json::Value;
25 use sync::Mutex;
26 use vm_memory::GuestMemory;
27 use vmm_vhost::message::VhostUserConfigFlags;
28 use vmm_vhost::message::VhostUserProtocolFeatures;
29 use vmm_vhost::BackendClient;
30 use vmm_vhost::VhostUserMemoryRegionInfo;
31 use vmm_vhost::VringConfigData;
32 use vmm_vhost::VHOST_USER_F_PROTOCOL_FEATURES;
33
34 use crate::pci::MsixConfig;
35 use crate::virtio::copy_config;
36 use crate::virtio::device_constants::VIRTIO_DEVICE_TYPE_SPECIFIC_FEATURES_MASK;
37 use crate::virtio::vhost_user_frontend::error::Error;
38 use crate::virtio::vhost_user_frontend::error::Result;
39 use crate::virtio::vhost_user_frontend::handler::BackendReqHandler;
40 use crate::virtio::vhost_user_frontend::handler::BackendReqHandlerImpl;
41 use crate::virtio::vhost_user_frontend::sys::create_backend_req_handler;
42 use crate::virtio::vhost_user_frontend::worker::Worker;
43 use crate::virtio::DeviceType;
44 use crate::virtio::Interrupt;
45 use crate::virtio::Queue;
46 use crate::virtio::QueueConfig;
47 use crate::virtio::SharedMemoryMapper;
48 use crate::virtio::SharedMemoryRegion;
49 use crate::virtio::VirtioDevice;
50 use crate::PciAddress;
51
52 pub struct VhostUserFrontend {
53 device_type: DeviceType,
54 worker_thread: Option<WorkerThread<Option<BackendReqHandler>>>,
55
56 backend_client: BackendClient,
57 avail_features: u64,
58 acked_features: u64,
59 protocol_features: VhostUserProtocolFeatures,
60 // `backend_req_handler` is only present if the backend supports BACKEND_REQ. `worker_thread`
61 // takes ownership of `backend_req_handler` when it starts. The worker thread will always
62 // return ownershp of the handler when stopped.
63 backend_req_handler: Option<BackendReqHandler>,
64 // Shared memory region info. IPC result from backend is saved with outer Option.
65 shmem_region: RefCell<Option<Option<SharedMemoryRegion>>>,
66
67 queue_sizes: Vec<u16>,
68 cfg: Option<Vec<u8>>,
69 expose_shmem_descriptors_with_viommu: bool,
70 pci_address: Option<PciAddress>,
71 }
72
73 // Returns the largest power of two that is less than or equal to `val`.
power_of_two_le(val: u16) -> Option<u16>74 fn power_of_two_le(val: u16) -> Option<u16> {
75 if val == 0 {
76 None
77 } else if val.is_power_of_two() {
78 Some(val)
79 } else {
80 val.checked_next_power_of_two()
81 .map(|next_pow_two| next_pow_two / 2)
82 }
83 }
84
85 impl VhostUserFrontend {
86 /// Create a new VirtioDevice for a vhost-user device frontend.
87 ///
88 /// # Arguments
89 ///
90 /// - `device_type`: virtio device type
91 /// - `base_features`: base virtio device features (e.g. `VIRTIO_F_VERSION_1`)
92 /// - `connection`: connection to the device backend
93 /// - `max_queue_size`: maximum number of entries in each queue (default: [`Queue::MAX_SIZE`])
new( device_type: DeviceType, base_features: u64, connection: vmm_vhost::SystemStream, max_queue_size: Option<u16>, pci_address: Option<PciAddress>, ) -> Result<VhostUserFrontend>94 pub fn new(
95 device_type: DeviceType,
96 base_features: u64,
97 connection: vmm_vhost::SystemStream,
98 max_queue_size: Option<u16>,
99 pci_address: Option<PciAddress>,
100 ) -> Result<VhostUserFrontend> {
101 VhostUserFrontend::new_internal(
102 connection,
103 device_type,
104 max_queue_size,
105 base_features,
106 None, // cfg
107 pci_address,
108 )
109 }
110
111 /// Create a new VirtioDevice for a vhost-user device frontend.
112 ///
113 /// # Arguments
114 ///
115 /// - `connection`: connection to the device backend
116 /// - `device_type`: virtio device type
117 /// - `max_queue_size`: maximum number of entries in each queue (default: [`Queue::MAX_SIZE`])
118 /// - `base_features`: base virtio device features (e.g. `VIRTIO_F_VERSION_1`)
119 /// - `cfg`: bytes to return for the virtio configuration space (queried from device if not
120 /// specified)
new_internal( connection: vmm_vhost::SystemStream, device_type: DeviceType, max_queue_size: Option<u16>, mut base_features: u64, cfg: Option<&[u8]>, pci_address: Option<PciAddress>, ) -> Result<VhostUserFrontend>121 pub(crate) fn new_internal(
122 connection: vmm_vhost::SystemStream,
123 device_type: DeviceType,
124 max_queue_size: Option<u16>,
125 mut base_features: u64,
126 cfg: Option<&[u8]>,
127 pci_address: Option<PciAddress>,
128 ) -> Result<VhostUserFrontend> {
129 // Don't allow packed queues even if requested. We don't handle them properly yet at the
130 // protocol layer.
131 // TODO: b/331466964 - Remove once packed queue support is added to BackendClient.
132 if base_features & (1 << virtio_sys::virtio_config::VIRTIO_F_RING_PACKED) != 0 {
133 base_features &= !(1 << virtio_sys::virtio_config::VIRTIO_F_RING_PACKED);
134 base::warn!(
135 "VIRTIO_F_RING_PACKED requested, but not yet supported by vhost-user frontend. \
136 Automatically disabled."
137 );
138 }
139
140 #[cfg(windows)]
141 let backend_pid = connection.target_pid();
142
143 let mut backend_client = BackendClient::from_stream(connection);
144
145 backend_client.set_owner().map_err(Error::SetOwner)?;
146
147 let allow_features = VIRTIO_DEVICE_TYPE_SPECIFIC_FEATURES_MASK
148 | base_features
149 | 1 << VHOST_USER_F_PROTOCOL_FEATURES;
150 let avail_features =
151 allow_features & backend_client.get_features().map_err(Error::GetFeatures)?;
152 let mut acked_features = 0;
153
154 let mut allow_protocol_features = VhostUserProtocolFeatures::CONFIG
155 | VhostUserProtocolFeatures::MQ
156 | VhostUserProtocolFeatures::BACKEND_REQ;
157
158 // HACK: the crosvm vhost-user GPU backend supports the non-standard
159 // VHOST_USER_PROTOCOL_FEATURE_SHARED_MEMORY_REGIONS. This should either be standardized
160 // (and enabled for all device types) or removed.
161 let expose_shmem_descriptors_with_viommu = if device_type == DeviceType::Gpu {
162 allow_protocol_features |= VhostUserProtocolFeatures::SHARED_MEMORY_REGIONS;
163 true
164 } else {
165 false
166 };
167
168 let mut protocol_features = VhostUserProtocolFeatures::empty();
169 if avail_features & 1 << VHOST_USER_F_PROTOCOL_FEATURES != 0 {
170 // The vhost-user backend supports VHOST_USER_F_PROTOCOL_FEATURES; enable it.
171 backend_client
172 .set_features(1 << VHOST_USER_F_PROTOCOL_FEATURES)
173 .map_err(Error::SetFeatures)?;
174 acked_features |= 1 << VHOST_USER_F_PROTOCOL_FEATURES;
175
176 let avail_protocol_features = backend_client
177 .get_protocol_features()
178 .map_err(Error::GetProtocolFeatures)?;
179 protocol_features = allow_protocol_features & avail_protocol_features;
180 backend_client
181 .set_protocol_features(protocol_features)
182 .map_err(Error::SetProtocolFeatures)?;
183 }
184
185 // if protocol feature `VhostUserProtocolFeatures::BACKEND_REQ` is negotiated.
186 let backend_req_handler =
187 if protocol_features.contains(VhostUserProtocolFeatures::BACKEND_REQ) {
188 let (handler, tx_fd) = create_backend_req_handler(
189 BackendReqHandlerImpl::new(),
190 #[cfg(windows)]
191 backend_pid,
192 )?;
193 backend_client
194 .set_backend_req_fd(&tx_fd)
195 .map_err(Error::SetDeviceRequestChannel)?;
196 Some(handler)
197 } else {
198 None
199 };
200
201 // If the device supports VHOST_USER_PROTOCOL_F_MQ, use VHOST_USER_GET_QUEUE_NUM to
202 // determine the number of queues supported. Otherwise, use the minimum number of queues
203 // required by the spec for this device type.
204 let num_queues = if protocol_features.contains(VhostUserProtocolFeatures::MQ) {
205 trace!("backend supports VHOST_USER_PROTOCOL_F_MQ");
206 let num_queues = backend_client.get_queue_num().map_err(Error::GetQueueNum)?;
207 trace!("VHOST_USER_GET_QUEUE_NUM returned {num_queues}");
208 num_queues as usize
209 } else {
210 trace!("backend does not support VHOST_USER_PROTOCOL_F_MQ");
211 device_type.min_queues()
212 };
213
214 // Clamp the maximum queue size to the largest power of 2 <= max_queue_size.
215 let max_queue_size = max_queue_size
216 .and_then(power_of_two_le)
217 .unwrap_or(Queue::MAX_SIZE);
218
219 trace!(
220 "vhost-user {device_type} frontend with {num_queues} queues x {max_queue_size} entries\
221 {}",
222 if let Some(pci_address) = pci_address {
223 format!(" pci-address {pci_address}")
224 } else {
225 "".to_string()
226 }
227 );
228
229 let queue_sizes = vec![max_queue_size; num_queues];
230
231 Ok(VhostUserFrontend {
232 device_type,
233 worker_thread: None,
234 backend_client,
235 avail_features,
236 acked_features,
237 protocol_features,
238 backend_req_handler,
239 shmem_region: RefCell::new(None),
240 queue_sizes,
241 cfg: cfg.map(|cfg| cfg.to_vec()),
242 expose_shmem_descriptors_with_viommu,
243 pci_address,
244 })
245 }
246
set_mem_table(&mut self, mem: &GuestMemory) -> Result<()>247 fn set_mem_table(&mut self, mem: &GuestMemory) -> Result<()> {
248 let regions: Vec<_> = mem
249 .regions()
250 .map(|region| VhostUserMemoryRegionInfo {
251 guest_phys_addr: region.guest_addr.0,
252 memory_size: region.size as u64,
253 userspace_addr: region.host_addr as u64,
254 mmap_offset: region.shm_offset,
255 mmap_handle: region.shm.as_raw_descriptor(),
256 })
257 .collect();
258
259 self.backend_client
260 .set_mem_table(regions.as_slice())
261 .map_err(Error::SetMemTable)?;
262
263 Ok(())
264 }
265
266 /// Activates a vring for the given `queue`.
activate_vring( &mut self, mem: &GuestMemory, queue_index: usize, queue: &Queue, irqfd: &Event, ) -> Result<()>267 fn activate_vring(
268 &mut self,
269 mem: &GuestMemory,
270 queue_index: usize,
271 queue: &Queue,
272 irqfd: &Event,
273 ) -> Result<()> {
274 self.backend_client
275 .set_vring_num(queue_index, queue.size())
276 .map_err(Error::SetVringNum)?;
277
278 let config_data = VringConfigData {
279 queue_size: queue.size(),
280 flags: 0u32,
281 desc_table_addr: mem
282 .get_host_address(queue.desc_table())
283 .map_err(Error::GetHostAddress)? as u64,
284 used_ring_addr: mem
285 .get_host_address(queue.used_ring())
286 .map_err(Error::GetHostAddress)? as u64,
287 avail_ring_addr: mem
288 .get_host_address(queue.avail_ring())
289 .map_err(Error::GetHostAddress)? as u64,
290 log_addr: None,
291 };
292 self.backend_client
293 .set_vring_addr(queue_index, &config_data)
294 .map_err(Error::SetVringAddr)?;
295
296 self.backend_client
297 .set_vring_base(queue_index, 0)
298 .map_err(Error::SetVringBase)?;
299
300 self.backend_client
301 .set_vring_call(queue_index, irqfd)
302 .map_err(Error::SetVringCall)?;
303 self.backend_client
304 .set_vring_kick(queue_index, queue.event())
305 .map_err(Error::SetVringKick)?;
306
307 // Per protocol documentation, `VHOST_USER_SET_VRING_ENABLE` should be sent only when
308 // `VHOST_USER_F_PROTOCOL_FEATURES` has been negotiated.
309 if self.acked_features & 1 << VHOST_USER_F_PROTOCOL_FEATURES != 0 {
310 self.backend_client
311 .set_vring_enable(queue_index, true)
312 .map_err(Error::SetVringEnable)?;
313 }
314
315 Ok(())
316 }
317
318 /// Helper to start up the worker thread that will be used with handling interrupts and requests
319 /// from the device process.
start_worker(&mut self, interrupt: Interrupt, non_msix_evt: Event)320 fn start_worker(&mut self, interrupt: Interrupt, non_msix_evt: Event) {
321 assert!(
322 self.worker_thread.is_none(),
323 "BUG: attempted to start worker twice"
324 );
325
326 let label = format!("vhost_user_virtio_{}", self.device_type);
327
328 let mut backend_req_handler = self.backend_req_handler.take();
329 if let Some(handler) = &mut backend_req_handler {
330 // Using unwrap here to get the mutex protected value
331 handler.frontend_mut().set_interrupt(interrupt.clone());
332 }
333
334 self.worker_thread = Some(WorkerThread::start(label.clone(), move |kill_evt| {
335 let ex = cros_async::Executor::new().expect("failed to create an executor");
336 let ex2 = ex.clone();
337 ex.run_until(async {
338 let mut worker = Worker {
339 kill_evt,
340 non_msix_evt,
341 backend_req_handler,
342 };
343 if let Err(e) = worker.run(&ex2, interrupt).await {
344 error!("failed to run {} worker: {:#}", label, e);
345 }
346 worker.backend_req_handler
347 })
348 .expect("run_until failed")
349 }));
350 }
351 }
352
353 impl VirtioDevice for VhostUserFrontend {
keep_rds(&self) -> Vec<RawDescriptor>354 fn keep_rds(&self) -> Vec<RawDescriptor> {
355 Vec::new()
356 }
357
device_type(&self) -> DeviceType358 fn device_type(&self) -> DeviceType {
359 self.device_type
360 }
361
queue_max_sizes(&self) -> &[u16]362 fn queue_max_sizes(&self) -> &[u16] {
363 &self.queue_sizes
364 }
365
features(&self) -> u64366 fn features(&self) -> u64 {
367 self.avail_features
368 }
369
ack_features(&mut self, features: u64)370 fn ack_features(&mut self, features: u64) {
371 let features = (features & self.avail_features) | self.acked_features;
372 if let Err(e) = self
373 .backend_client
374 .set_features(features)
375 .map_err(Error::SetFeatures)
376 {
377 error!("failed to enable features 0x{:x}: {}", features, e);
378 return;
379 }
380 self.acked_features = features;
381 }
382
read_config(&self, offset: u64, data: &mut [u8])383 fn read_config(&self, offset: u64, data: &mut [u8]) {
384 if let Some(cfg) = &self.cfg {
385 copy_config(data, 0, cfg, offset);
386 return;
387 }
388
389 let Ok(offset) = offset.try_into() else {
390 error!("failed to read config: invalid config offset is given: {offset}");
391 return;
392 };
393 let Ok(data_len) = data.len().try_into() else {
394 error!(
395 "failed to read config: invalid config length is given: {}",
396 data.len()
397 );
398 return;
399 };
400 let (_, config) = match self.backend_client.get_config(
401 offset,
402 data_len,
403 VhostUserConfigFlags::WRITABLE,
404 data,
405 ) {
406 Ok(x) => x,
407 Err(e) => {
408 error!("failed to read config: {}", Error::GetConfig(e));
409 return;
410 }
411 };
412 data.copy_from_slice(&config);
413 }
414
write_config(&mut self, offset: u64, data: &[u8])415 fn write_config(&mut self, offset: u64, data: &[u8]) {
416 let Ok(offset) = offset.try_into() else {
417 error!("failed to write config: invalid config offset is given: {offset}");
418 return;
419 };
420 if let Err(e) = self
421 .backend_client
422 .set_config(offset, VhostUserConfigFlags::empty(), data)
423 .map_err(Error::SetConfig)
424 {
425 error!("failed to write config: {}", e);
426 }
427 }
428
activate( &mut self, mem: GuestMemory, interrupt: Interrupt, queues: BTreeMap<usize, Queue>, ) -> anyhow::Result<()>429 fn activate(
430 &mut self,
431 mem: GuestMemory,
432 interrupt: Interrupt,
433 queues: BTreeMap<usize, Queue>,
434 ) -> anyhow::Result<()> {
435 self.set_mem_table(&mem)?;
436
437 let msix_config_opt = interrupt
438 .get_msix_config()
439 .as_ref()
440 .ok_or(Error::MsixConfigUnavailable)?;
441 let msix_config = msix_config_opt.lock();
442
443 let non_msix_evt = Event::new().map_err(Error::CreateEvent)?;
444 for (&queue_index, queue) in queues.iter() {
445 let irqfd = msix_config
446 .get_irqfd(queue.vector() as usize)
447 .unwrap_or(&non_msix_evt);
448 self.activate_vring(&mem, queue_index, queue, irqfd)?;
449 }
450
451 drop(msix_config);
452
453 self.start_worker(interrupt, non_msix_evt);
454 Ok(())
455 }
456
reset(&mut self) -> anyhow::Result<()>457 fn reset(&mut self) -> anyhow::Result<()> {
458 for queue_index in 0..self.queue_sizes.len() {
459 if self.acked_features & 1 << VHOST_USER_F_PROTOCOL_FEATURES != 0 {
460 self.backend_client
461 .set_vring_enable(queue_index, false)
462 .context("set_vring_enable failed during reset")?;
463 }
464 let _vring_base = self
465 .backend_client
466 .get_vring_base(queue_index)
467 .context("get_vring_base failed during reset")?;
468 }
469
470 if let Some(w) = self.worker_thread.take() {
471 self.backend_req_handler = w.stop();
472 }
473
474 Ok(())
475 }
476
pci_address(&self) -> Option<PciAddress>477 fn pci_address(&self) -> Option<PciAddress> {
478 self.pci_address
479 }
480
get_shared_memory_region(&self) -> Option<SharedMemoryRegion>481 fn get_shared_memory_region(&self) -> Option<SharedMemoryRegion> {
482 if !self
483 .protocol_features
484 .contains(VhostUserProtocolFeatures::SHARED_MEMORY_REGIONS)
485 {
486 return None;
487 }
488 if let Some(r) = self.shmem_region.borrow().as_ref() {
489 return r.clone();
490 }
491 let regions = match self
492 .backend_client
493 .get_shared_memory_regions()
494 .map_err(Error::ShmemRegions)
495 {
496 Ok(x) => x,
497 Err(e) => {
498 error!("Failed to get shared memory regions {}", e);
499 return None;
500 }
501 };
502 let region = match regions.len() {
503 0 => None,
504 1 => Some(SharedMemoryRegion {
505 id: regions[0].id,
506 length: regions[0].length,
507 }),
508 n => {
509 error!(
510 "Failed to get shared memory regions {}",
511 Error::TooManyShmemRegions(n)
512 );
513 return None;
514 }
515 };
516
517 *self.shmem_region.borrow_mut() = Some(region.clone());
518 region
519 }
520
set_shared_memory_mapper(&mut self, mapper: Box<dyn SharedMemoryMapper>)521 fn set_shared_memory_mapper(&mut self, mapper: Box<dyn SharedMemoryMapper>) {
522 // Return error if backend request handler is not available. This indicates
523 // that `VhostUserProtocolFeatures::BACKEND_REQ` is not negotiated.
524 let Some(backend_req_handler) = self.backend_req_handler.as_mut() else {
525 error!(
526 "Error setting shared memory mapper {}",
527 Error::ProtocolFeatureNotNegoiated(VhostUserProtocolFeatures::BACKEND_REQ)
528 );
529 return;
530 };
531
532 // The virtio framework will only call this if get_shared_memory_region returned a region
533 let shmid = self
534 .shmem_region
535 .borrow()
536 .clone()
537 .flatten()
538 .expect("missing shmid")
539 .id;
540
541 backend_req_handler
542 .frontend_mut()
543 .set_shared_mapper_state(mapper, shmid);
544 }
545
expose_shmem_descriptors_with_viommu(&self) -> bool546 fn expose_shmem_descriptors_with_viommu(&self) -> bool {
547 self.expose_shmem_descriptors_with_viommu
548 }
549
virtio_sleep(&mut self) -> anyhow::Result<Option<BTreeMap<usize, Queue>>>550 fn virtio_sleep(&mut self) -> anyhow::Result<Option<BTreeMap<usize, Queue>>> {
551 self.backend_client.sleep().map_err(Error::Sleep)?;
552
553 // Vhost user devices won't return queues on sleep, so return an empty Vec so that
554 // VirtioPciDevice can set the sleep state properly.
555 Ok(Some(BTreeMap::new()))
556 }
557
virtio_wake( &mut self, _queues_state: Option<(GuestMemory, Interrupt, BTreeMap<usize, Queue>)>, ) -> anyhow::Result<()>558 fn virtio_wake(
559 &mut self,
560 // Vhost user doesn't need to pass queue_states back to the device process, since it will
561 // already have it.
562 _queues_state: Option<(GuestMemory, Interrupt, BTreeMap<usize, Queue>)>,
563 ) -> anyhow::Result<()> {
564 self.backend_client.wake().map_err(Error::Wake)?;
565 Ok(())
566 }
567
virtio_snapshot(&mut self) -> anyhow::Result<Value>568 fn virtio_snapshot(&mut self) -> anyhow::Result<Value> {
569 let snapshot_bytes = self.backend_client.snapshot().map_err(Error::Snapshot)?;
570 Ok(serde_json::to_value(snapshot_bytes).map_err(Error::SliceToSerdeValue)?)
571 }
572
virtio_restore(&mut self, _data: Value) -> anyhow::Result<()>573 fn virtio_restore(&mut self, _data: Value) -> anyhow::Result<()> {
574 panic!("virtio_restore should not be called for vhost-user devices.")
575 }
576
is_vhost_user(&self) -> bool577 fn is_vhost_user(&self) -> bool {
578 true
579 }
580
vhost_user_restore( &mut self, data: Value, queue_configs: &[QueueConfig], queue_evts: Option<Vec<Event>>, interrupt: Option<Interrupt>, mem: GuestMemory, msix_config: &Arc<Mutex<MsixConfig>>, device_activated: bool, ) -> anyhow::Result<()>581 fn vhost_user_restore(
582 &mut self,
583 data: Value,
584 queue_configs: &[QueueConfig],
585 queue_evts: Option<Vec<Event>>,
586 interrupt: Option<Interrupt>,
587 mem: GuestMemory,
588 msix_config: &Arc<Mutex<MsixConfig>>,
589 device_activated: bool,
590 ) -> anyhow::Result<()> {
591 // Other aspects of the restore operation will depend on the mem table
592 // being set.
593 self.set_mem_table(&mem)?;
594
595 if device_activated {
596 let non_msix_evt = Event::new().context("Failed to create event")?;
597 queue_configs
598 .iter()
599 .enumerate()
600 .filter(|(_, q)| q.ready())
601 .try_for_each(|(queue_index, queue)| {
602 let msix_lock = msix_config.lock();
603 let irqfd = msix_lock
604 .get_irqfd(queue.vector() as usize)
605 .unwrap_or(&non_msix_evt);
606
607 self.backend_client
608 .set_vring_call(queue_index, irqfd)
609 .map_err(Error::SetVringCall)
610 .context("Failed to restore irqfd")?;
611
612 Ok::<(), anyhow::Error>(())
613 })?;
614
615 self.start_worker(
616 interrupt.expect(
617 "Interrupt doesn't exist. This shouldn't \
618 happen since the device is activated.",
619 ),
620 non_msix_evt,
621 );
622 }
623
624 let data_bytes: Vec<u8> = serde_json::from_value(data).map_err(Error::SerdeValueToSlice)?;
625 self.backend_client
626 .restore(data_bytes.as_slice(), queue_evts)
627 .map_err(Error::Restore)?;
628
629 Ok(())
630 }
631 }
632