1 // Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved. 2 // SPDX-License-Identifier: Apache-2.0 3 4 use std::fs::File; 5 use std::mem; 6 use std::path::Path; 7 8 use anyhow::anyhow; 9 use base::AsRawDescriptor; 10 use base::Event; 11 use base::RawDescriptor; 12 use base::INVALID_DESCRIPTOR; 13 use zerocopy::AsBytes; 14 use zerocopy::FromBytes; 15 16 use crate::backend::VhostUserMemoryRegionInfo; 17 use crate::backend::VringConfigData; 18 use crate::into_single_file; 19 use crate::message::*; 20 use crate::Connection; 21 use crate::Error as VhostUserError; 22 use crate::FrontendReq; 23 use crate::Result as VhostUserResult; 24 use crate::Result; 25 use crate::SystemStream; 26 27 /// Client for a vhost-user device. The API is a thin abstraction over the vhost-user protocol. 28 pub struct BackendClient { 29 connection: Connection<FrontendReq>, 30 // Cached virtio features from the backend. 31 virtio_features: u64, 32 // Cached acked virtio features from the driver. 33 acked_virtio_features: u64, 34 // Cached vhost-user protocol features. 35 acked_protocol_features: u64, 36 } 37 38 impl BackendClient { 39 /// Create a new instance from a Unix stream socket. from_stream(sock: SystemStream) -> Self40 pub fn from_stream(sock: SystemStream) -> Self { 41 Self::new(Connection::from(sock)) 42 } 43 44 /// Create a new instance. new(connection: Connection<FrontendReq>) -> Self45 fn new(connection: Connection<FrontendReq>) -> Self { 46 BackendClient { 47 connection, 48 virtio_features: 0, 49 acked_virtio_features: 0, 50 acked_protocol_features: 0, 51 } 52 } 53 54 /// Create a new instance. 55 /// 56 /// Will retry as the backend may not be ready to accept the connection. 57 /// 58 /// # Arguments 59 /// * `path` - path of Unix domain socket listener to connect to connect<P: AsRef<Path>>(path: P) -> Result<Self>60 pub fn connect<P: AsRef<Path>>(path: P) -> Result<Self> { 61 let mut retry_count = 5; 62 let connection = loop { 63 match Connection::connect(&path) { 64 Ok(connection) => break Ok(connection), 65 Err(e) => match &e { 66 VhostUserError::SocketConnect(why) => { 67 if why.kind() == std::io::ErrorKind::ConnectionRefused && retry_count > 0 { 68 std::thread::sleep(std::time::Duration::from_millis(100)); 69 retry_count -= 1; 70 continue; 71 } else { 72 break Err(e); 73 } 74 } 75 _ => break Err(e), 76 }, 77 } 78 }?; 79 80 Ok(Self::new(connection)) 81 } 82 83 /// Get a bitmask of supported virtio/vhost features. get_features(&mut self) -> Result<u64>84 pub fn get_features(&mut self) -> Result<u64> { 85 let hdr = self.send_request_header(FrontendReq::GET_FEATURES, None)?; 86 let val = self.recv_reply::<VhostUserU64>(&hdr)?; 87 self.virtio_features = val.value; 88 Ok(self.virtio_features) 89 } 90 91 /// Inform the vhost subsystem which features to enable. 92 /// This should be a subset of supported features from get_features(). set_features(&mut self, features: u64) -> Result<()>93 pub fn set_features(&mut self, features: u64) -> Result<()> { 94 let val = VhostUserU64::new(features); 95 let hdr = self.send_request_with_body(FrontendReq::SET_FEATURES, &val, None)?; 96 self.acked_virtio_features = features & self.virtio_features; 97 self.wait_for_ack(&hdr) 98 } 99 100 /// Set the current process as the owner of the vhost backend. 101 /// This must be run before any other vhost commands. set_owner(&self) -> Result<()>102 pub fn set_owner(&self) -> Result<()> { 103 let hdr = self.send_request_header(FrontendReq::SET_OWNER, None)?; 104 self.wait_for_ack(&hdr) 105 } 106 107 /// Used to be sent to request disabling all rings 108 /// This is no longer used. reset_owner(&self) -> Result<()>109 pub fn reset_owner(&self) -> Result<()> { 110 let hdr = self.send_request_header(FrontendReq::RESET_OWNER, None)?; 111 self.wait_for_ack(&hdr) 112 } 113 114 /// Set the memory map regions on the backend so it can translate the vring 115 /// addresses. In the ancillary data there is an array of file descriptors set_mem_table(&self, regions: &[VhostUserMemoryRegionInfo]) -> Result<()>116 pub fn set_mem_table(&self, regions: &[VhostUserMemoryRegionInfo]) -> Result<()> { 117 if regions.is_empty() || regions.len() > MAX_ATTACHED_FD_ENTRIES { 118 return Err(VhostUserError::InvalidParam); 119 } 120 121 let mut ctx = VhostUserMemoryContext::new(); 122 for region in regions.iter() { 123 if region.memory_size == 0 || region.mmap_handle == INVALID_DESCRIPTOR { 124 return Err(VhostUserError::InvalidParam); 125 } 126 127 let reg = VhostUserMemoryRegion { 128 guest_phys_addr: region.guest_phys_addr, 129 memory_size: region.memory_size, 130 user_addr: region.userspace_addr, 131 mmap_offset: region.mmap_offset, 132 }; 133 ctx.append(®, region.mmap_handle); 134 } 135 136 let body = VhostUserMemory::new(ctx.regions.len() as u32); 137 let hdr = self.send_request_with_payload( 138 FrontendReq::SET_MEM_TABLE, 139 &body, 140 ctx.regions.as_bytes(), 141 Some(ctx.fds.as_slice()), 142 )?; 143 self.wait_for_ack(&hdr) 144 } 145 146 /// Set base address for page modification logging. set_log_base(&self, base: u64, fd: Option<RawDescriptor>) -> Result<()>147 pub fn set_log_base(&self, base: u64, fd: Option<RawDescriptor>) -> Result<()> { 148 let val = VhostUserU64::new(base); 149 150 let should_have_fd = 151 self.acked_protocol_features & VhostUserProtocolFeatures::LOG_SHMFD.bits() != 0; 152 if should_have_fd != fd.is_some() { 153 return Err(VhostUserError::InvalidParam); 154 } 155 156 let _ = self.send_request_with_body( 157 FrontendReq::SET_LOG_BASE, 158 &val, 159 fd.as_ref().map(std::slice::from_ref), 160 )?; 161 162 Ok(()) 163 } 164 165 /// Specify an event file descriptor to signal on log write. set_log_fd(&self, fd: RawDescriptor) -> Result<()>166 pub fn set_log_fd(&self, fd: RawDescriptor) -> Result<()> { 167 let fds = [fd]; 168 let hdr = self.send_request_header(FrontendReq::SET_LOG_FD, Some(&fds))?; 169 self.wait_for_ack(&hdr) 170 } 171 172 /// Set the number of descriptors in the vring. set_vring_num(&self, queue_index: usize, num: u16) -> Result<()>173 pub fn set_vring_num(&self, queue_index: usize, num: u16) -> Result<()> { 174 let val = VhostUserVringState::new(queue_index as u32, num.into()); 175 let hdr = self.send_request_with_body(FrontendReq::SET_VRING_NUM, &val, None)?; 176 self.wait_for_ack(&hdr) 177 } 178 179 /// Set the addresses for a given vring. set_vring_addr(&self, queue_index: usize, config_data: &VringConfigData) -> Result<()>180 pub fn set_vring_addr(&self, queue_index: usize, config_data: &VringConfigData) -> Result<()> { 181 if config_data.flags & !(VhostUserVringAddrFlags::all().bits()) != 0 { 182 return Err(VhostUserError::InvalidParam); 183 } 184 185 let val = VhostUserVringAddr::from_config_data(queue_index as u32, config_data); 186 let hdr = self.send_request_with_body(FrontendReq::SET_VRING_ADDR, &val, None)?; 187 self.wait_for_ack(&hdr) 188 } 189 190 /// Set the first index to look for available descriptors. 191 // TODO: b/331466964 - Arguments and message format are wrong for packed queues. set_vring_base(&self, queue_index: usize, base: u16) -> Result<()>192 pub fn set_vring_base(&self, queue_index: usize, base: u16) -> Result<()> { 193 let val = VhostUserVringState::new(queue_index as u32, base.into()); 194 let hdr = self.send_request_with_body(FrontendReq::SET_VRING_BASE, &val, None)?; 195 self.wait_for_ack(&hdr) 196 } 197 198 /// Get the available vring base offset. 199 // TODO: b/331466964 - Return type is wrong for packed queues. get_vring_base(&self, queue_index: usize) -> Result<u32>200 pub fn get_vring_base(&self, queue_index: usize) -> Result<u32> { 201 let req = VhostUserVringState::new(queue_index as u32, 0); 202 let hdr = self.send_request_with_body(FrontendReq::GET_VRING_BASE, &req, None)?; 203 let reply = self.recv_reply::<VhostUserVringState>(&hdr)?; 204 Ok(reply.num) 205 } 206 207 /// Set the event to trigger when buffers have been used by the host. 208 /// 209 /// Bits (0-7) of the payload contain the vring index. Bit 8 is the invalid FD flag. This flag 210 /// is set when there is no file descriptor in the ancillary data. This signals that polling 211 /// will be used instead of waiting for the call. set_vring_call(&self, queue_index: usize, event: &Event) -> Result<()>212 pub fn set_vring_call(&self, queue_index: usize, event: &Event) -> Result<()> { 213 let hdr = self.send_fd_for_vring( 214 FrontendReq::SET_VRING_CALL, 215 queue_index, 216 event.as_raw_descriptor(), 217 )?; 218 self.wait_for_ack(&hdr) 219 } 220 221 /// Set the event that will be signaled by the guest when buffers are available for the host to 222 /// process. 223 /// 224 /// Bits (0-7) of the payload contain the vring index. Bit 8 is the invalid FD flag. This flag 225 /// is set when there is no file descriptor in the ancillary data. This signals that polling 226 /// should be used instead of waiting for a kick. set_vring_kick(&self, queue_index: usize, event: &Event) -> Result<()>227 pub fn set_vring_kick(&self, queue_index: usize, event: &Event) -> Result<()> { 228 let hdr = self.send_fd_for_vring( 229 FrontendReq::SET_VRING_KICK, 230 queue_index, 231 event.as_raw_descriptor(), 232 )?; 233 self.wait_for_ack(&hdr) 234 } 235 236 /// Set the event that will be signaled by the guest when error happens. 237 /// 238 /// Bits (0-7) of the payload contain the vring index. Bit 8 is the invalid FD flag. This flag 239 /// is set when there is no file descriptor in the ancillary data. set_vring_err(&self, queue_index: usize, event: &Event) -> Result<()>240 pub fn set_vring_err(&self, queue_index: usize, event: &Event) -> Result<()> { 241 let hdr = self.send_fd_for_vring( 242 FrontendReq::SET_VRING_ERR, 243 queue_index, 244 event.as_raw_descriptor(), 245 )?; 246 self.wait_for_ack(&hdr) 247 } 248 249 /// Put the device to sleep. sleep(&self) -> Result<()>250 pub fn sleep(&self) -> Result<()> { 251 let hdr = self.send_request_header(FrontendReq::SLEEP, None)?; 252 let reply = self.recv_reply::<VhostUserSuccess>(&hdr)?; 253 if !reply.success() { 254 Err(VhostUserError::SleepError(anyhow!( 255 "Device process responded with a failure on SLEEP." 256 ))) 257 } else { 258 Ok(()) 259 } 260 } 261 262 /// Wake the device up. wake(&self) -> Result<()>263 pub fn wake(&self) -> Result<()> { 264 let hdr = self.send_request_header(FrontendReq::WAKE, None)?; 265 let reply = self.recv_reply::<VhostUserSuccess>(&hdr)?; 266 if !reply.success() { 267 Err(VhostUserError::WakeError(anyhow!( 268 "Device process responded with a failure on WAKE." 269 ))) 270 } else { 271 Ok(()) 272 } 273 } 274 275 /// Snapshot the device and receive serialized state of the device. snapshot(&self) -> Result<Vec<u8>>276 pub fn snapshot(&self) -> Result<Vec<u8>> { 277 let hdr = self.send_request_header(FrontendReq::SNAPSHOT, None)?; 278 let (success_msg, buf_reply, _) = self.recv_reply_with_payload::<VhostUserSuccess>(&hdr)?; 279 if !success_msg.success() { 280 Err(VhostUserError::SnapshotError(anyhow!( 281 "Device process responded with a failure on SNAPSHOT." 282 ))) 283 } else { 284 Ok(buf_reply) 285 } 286 } 287 288 /// Restore the device. restore(&mut self, data_bytes: &[u8], queue_evts: Option<Vec<Event>>) -> Result<()>289 pub fn restore(&mut self, data_bytes: &[u8], queue_evts: Option<Vec<Event>>) -> Result<()> { 290 let body = VhostUserEmptyMsg; 291 292 let queue_evt_fds: Option<Vec<RawDescriptor>> = queue_evts.as_ref().map(|queue_evts| { 293 queue_evts 294 .iter() 295 .map(|queue_evt| queue_evt.as_raw_descriptor()) 296 .collect() 297 }); 298 299 let hdr = self.send_request_with_payload( 300 FrontendReq::RESTORE, 301 &body, 302 data_bytes, 303 queue_evt_fds.as_deref(), 304 )?; 305 let reply = self.recv_reply::<VhostUserSuccess>(&hdr)?; 306 if !reply.success() { 307 Err(VhostUserError::RestoreError(anyhow!( 308 "Device process responded with a failure on RESTORE." 309 ))) 310 } else { 311 Ok(()) 312 } 313 } 314 315 /// Get the protocol feature bitmask from the underlying vhost implementation. get_protocol_features(&self) -> Result<VhostUserProtocolFeatures>316 pub fn get_protocol_features(&self) -> Result<VhostUserProtocolFeatures> { 317 if self.virtio_features & 1 << VHOST_USER_F_PROTOCOL_FEATURES == 0 { 318 return Err(VhostUserError::InvalidOperation); 319 } 320 let hdr = self.send_request_header(FrontendReq::GET_PROTOCOL_FEATURES, None)?; 321 let val = self.recv_reply::<VhostUserU64>(&hdr)?; 322 // Should we support forward compatibility? 323 // If so just mask out unrecognized flags instead of return errors. 324 match VhostUserProtocolFeatures::from_bits(val.value) { 325 Some(val) => Ok(val), 326 None => Err(VhostUserError::InvalidMessage), 327 } 328 } 329 330 /// Enable protocol features in the underlying vhost implementation. set_protocol_features(&mut self, features: VhostUserProtocolFeatures) -> Result<()>331 pub fn set_protocol_features(&mut self, features: VhostUserProtocolFeatures) -> Result<()> { 332 if self.virtio_features & 1 << VHOST_USER_F_PROTOCOL_FEATURES == 0 { 333 return Err(VhostUserError::InvalidOperation); 334 } 335 if features.contains(VhostUserProtocolFeatures::SHARED_MEMORY_REGIONS) 336 && !features.contains(VhostUserProtocolFeatures::BACKEND_REQ) 337 { 338 return Err(VhostUserError::FeatureMismatch); 339 } 340 let val = VhostUserU64::new(features.bits()); 341 let hdr = self.send_request_with_body(FrontendReq::SET_PROTOCOL_FEATURES, &val, None)?; 342 // Don't wait for ACK here because the protocol feature negotiation process hasn't been 343 // completed yet. 344 self.acked_protocol_features = features.bits(); 345 self.wait_for_ack(&hdr) 346 } 347 348 /// Query how many queues the backend supports. get_queue_num(&self) -> Result<u64>349 pub fn get_queue_num(&self) -> Result<u64> { 350 if !self.is_feature_mq_available() { 351 return Err(VhostUserError::InvalidOperation); 352 } 353 354 let hdr = self.send_request_header(FrontendReq::GET_QUEUE_NUM, None)?; 355 let val = self.recv_reply::<VhostUserU64>(&hdr)?; 356 if val.value > VHOST_USER_MAX_VRINGS { 357 return Err(VhostUserError::InvalidMessage); 358 } 359 Ok(val.value) 360 } 361 362 /// Signal backend to enable or disable corresponding vring. 363 /// 364 /// Backend must not pass data to/from the ring until ring is enabled by 365 /// VHOST_USER_SET_VRING_ENABLE with parameter 1, or after it has been 366 /// disabled by VHOST_USER_SET_VRING_ENABLE with parameter 0. set_vring_enable(&self, queue_index: usize, enable: bool) -> Result<()>367 pub fn set_vring_enable(&self, queue_index: usize, enable: bool) -> Result<()> { 368 // set_vring_enable() is supported only when PROTOCOL_FEATURES has been enabled. 369 if self.acked_virtio_features & 1 << VHOST_USER_F_PROTOCOL_FEATURES == 0 { 370 return Err(VhostUserError::InvalidOperation); 371 } 372 373 let val = VhostUserVringState::new(queue_index as u32, enable.into()); 374 let hdr = self.send_request_with_body(FrontendReq::SET_VRING_ENABLE, &val, None)?; 375 self.wait_for_ack(&hdr) 376 } 377 378 /// Fetch the contents of the virtio device configuration space. get_config( &self, offset: u32, size: u32, flags: VhostUserConfigFlags, buf: &[u8], ) -> Result<(VhostUserConfig, VhostUserConfigPayload)>379 pub fn get_config( 380 &self, 381 offset: u32, 382 size: u32, 383 flags: VhostUserConfigFlags, 384 buf: &[u8], 385 ) -> Result<(VhostUserConfig, VhostUserConfigPayload)> { 386 let body = VhostUserConfig::new(offset, size, flags); 387 if !body.is_valid() { 388 return Err(VhostUserError::InvalidParam); 389 } 390 391 // depends on VhostUserProtocolFeatures::CONFIG 392 if self.acked_protocol_features & VhostUserProtocolFeatures::CONFIG.bits() == 0 { 393 return Err(VhostUserError::InvalidOperation); 394 } 395 396 // vhost-user spec states that: 397 // "Request payload: virtio device config space" 398 // "Reply payload: virtio device config space" 399 let hdr = self.send_request_with_payload(FrontendReq::GET_CONFIG, &body, buf, None)?; 400 let (body_reply, buf_reply, rfds) = 401 self.recv_reply_with_payload::<VhostUserConfig>(&hdr)?; 402 if !rfds.is_empty() { 403 return Err(VhostUserError::InvalidMessage); 404 } else if body_reply.size == 0 { 405 return Err(VhostUserError::BackendInternalError); 406 } else if body_reply.size != body.size 407 || body_reply.size as usize != buf.len() 408 || body_reply.offset != body.offset 409 { 410 return Err(VhostUserError::InvalidMessage); 411 } 412 413 Ok((body_reply, buf_reply)) 414 } 415 416 /// Change the virtio device configuration space. It also can be used for live migration on the 417 /// destination host to set readonly configuration space fields. set_config(&self, offset: u32, flags: VhostUserConfigFlags, buf: &[u8]) -> Result<()>418 pub fn set_config(&self, offset: u32, flags: VhostUserConfigFlags, buf: &[u8]) -> Result<()> { 419 let body = VhostUserConfig::new( 420 offset, 421 buf.len() 422 .try_into() 423 .map_err(VhostUserError::InvalidCastToInt)?, 424 flags, 425 ); 426 if !body.is_valid() { 427 return Err(VhostUserError::InvalidParam); 428 } 429 430 // depends on VhostUserProtocolFeatures::CONFIG 431 if self.acked_protocol_features & VhostUserProtocolFeatures::CONFIG.bits() == 0 { 432 return Err(VhostUserError::InvalidOperation); 433 } 434 435 let hdr = self.send_request_with_payload(FrontendReq::SET_CONFIG, &body, buf, None)?; 436 self.wait_for_ack(&hdr) 437 } 438 439 /// Setup backend communication channel. set_backend_req_fd(&self, fd: &dyn AsRawDescriptor) -> Result<()>440 pub fn set_backend_req_fd(&self, fd: &dyn AsRawDescriptor) -> Result<()> { 441 if self.acked_protocol_features & VhostUserProtocolFeatures::BACKEND_REQ.bits() == 0 { 442 return Err(VhostUserError::InvalidOperation); 443 } 444 let fds = [fd.as_raw_descriptor()]; 445 let hdr = self.send_request_header(FrontendReq::SET_BACKEND_REQ_FD, Some(&fds))?; 446 self.wait_for_ack(&hdr) 447 } 448 449 /// Retrieve shared buffer for inflight I/O tracking. get_inflight_fd( &self, inflight: &VhostUserInflight, ) -> Result<(VhostUserInflight, File)>450 pub fn get_inflight_fd( 451 &self, 452 inflight: &VhostUserInflight, 453 ) -> Result<(VhostUserInflight, File)> { 454 if self.acked_protocol_features & VhostUserProtocolFeatures::INFLIGHT_SHMFD.bits() == 0 { 455 return Err(VhostUserError::InvalidOperation); 456 } 457 458 let hdr = self.send_request_with_body(FrontendReq::GET_INFLIGHT_FD, inflight, None)?; 459 let (inflight, files) = self.recv_reply_with_files::<VhostUserInflight>(&hdr)?; 460 461 match into_single_file(files) { 462 Some(file) => Ok((inflight, file)), 463 None => Err(VhostUserError::IncorrectFds), 464 } 465 } 466 467 /// Set shared buffer for inflight I/O tracking. set_inflight_fd(&self, inflight: &VhostUserInflight, fd: RawDescriptor) -> Result<()>468 pub fn set_inflight_fd(&self, inflight: &VhostUserInflight, fd: RawDescriptor) -> Result<()> { 469 if self.acked_protocol_features & VhostUserProtocolFeatures::INFLIGHT_SHMFD.bits() == 0 { 470 return Err(VhostUserError::InvalidOperation); 471 } 472 473 if inflight.mmap_size == 0 474 || inflight.num_queues == 0 475 || inflight.queue_size == 0 476 || fd == INVALID_DESCRIPTOR 477 { 478 return Err(VhostUserError::InvalidParam); 479 } 480 481 let hdr = 482 self.send_request_with_body(FrontendReq::SET_INFLIGHT_FD, inflight, Some(&[fd]))?; 483 self.wait_for_ack(&hdr) 484 } 485 486 /// Query the maximum amount of memory slots supported by the backend. get_max_mem_slots(&self) -> Result<u64>487 pub fn get_max_mem_slots(&self) -> Result<u64> { 488 if self.acked_protocol_features & VhostUserProtocolFeatures::CONFIGURE_MEM_SLOTS.bits() == 0 489 { 490 return Err(VhostUserError::InvalidOperation); 491 } 492 493 let hdr = self.send_request_header(FrontendReq::GET_MAX_MEM_SLOTS, None)?; 494 let val = self.recv_reply::<VhostUserU64>(&hdr)?; 495 496 Ok(val.value) 497 } 498 499 /// Add a new guest memory mapping for vhost to use. add_mem_region(&self, region: &VhostUserMemoryRegionInfo) -> Result<()>500 pub fn add_mem_region(&self, region: &VhostUserMemoryRegionInfo) -> Result<()> { 501 if self.acked_protocol_features & VhostUserProtocolFeatures::CONFIGURE_MEM_SLOTS.bits() == 0 502 { 503 return Err(VhostUserError::InvalidOperation); 504 } 505 506 if region.memory_size == 0 || region.mmap_handle == INVALID_DESCRIPTOR { 507 return Err(VhostUserError::InvalidParam); 508 } 509 510 let body = VhostUserSingleMemoryRegion::new( 511 region.guest_phys_addr, 512 region.memory_size, 513 region.userspace_addr, 514 region.mmap_offset, 515 ); 516 let fds = [region.mmap_handle]; 517 let hdr = self.send_request_with_body(FrontendReq::ADD_MEM_REG, &body, Some(&fds))?; 518 self.wait_for_ack(&hdr) 519 } 520 521 /// Remove a guest memory mapping from vhost. remove_mem_region(&self, region: &VhostUserMemoryRegionInfo) -> Result<()>522 pub fn remove_mem_region(&self, region: &VhostUserMemoryRegionInfo) -> Result<()> { 523 if self.acked_protocol_features & VhostUserProtocolFeatures::CONFIGURE_MEM_SLOTS.bits() == 0 524 { 525 return Err(VhostUserError::InvalidOperation); 526 } 527 if region.memory_size == 0 { 528 return Err(VhostUserError::InvalidParam); 529 } 530 531 let body = VhostUserSingleMemoryRegion::new( 532 region.guest_phys_addr, 533 region.memory_size, 534 region.userspace_addr, 535 region.mmap_offset, 536 ); 537 let hdr = self.send_request_with_body(FrontendReq::REM_MEM_REG, &body, None)?; 538 self.wait_for_ack(&hdr) 539 } 540 541 /// Gets the shared memory regions used by the device. get_shared_memory_regions(&self) -> Result<Vec<VhostSharedMemoryRegion>>542 pub fn get_shared_memory_regions(&self) -> Result<Vec<VhostSharedMemoryRegion>> { 543 let hdr = self.send_request_header(FrontendReq::GET_SHARED_MEMORY_REGIONS, None)?; 544 let (body_reply, buf_reply, rfds) = self.recv_reply_with_payload::<VhostUserU64>(&hdr)?; 545 let struct_size = mem::size_of::<VhostSharedMemoryRegion>(); 546 if !rfds.is_empty() || buf_reply.len() != body_reply.value as usize * struct_size { 547 return Err(VhostUserError::InvalidMessage); 548 } 549 let mut regions = Vec::new(); 550 let mut offset = 0; 551 for _ in 0..body_reply.value { 552 regions.push( 553 // Can't fail because the input is the correct size. 554 VhostSharedMemoryRegion::read_from(&buf_reply[offset..(offset + struct_size)]) 555 .unwrap(), 556 ); 557 offset += struct_size; 558 } 559 Ok(regions) 560 } 561 send_request_header( &self, code: FrontendReq, fds: Option<&[RawDescriptor]>, ) -> VhostUserResult<VhostUserMsgHeader<FrontendReq>>562 fn send_request_header( 563 &self, 564 code: FrontendReq, 565 fds: Option<&[RawDescriptor]>, 566 ) -> VhostUserResult<VhostUserMsgHeader<FrontendReq>> { 567 let hdr = self.new_request_header(code, 0); 568 self.connection.send_header_only_message(&hdr, fds)?; 569 Ok(hdr) 570 } 571 send_request_with_body<T: Sized + AsBytes>( &self, code: FrontendReq, msg: &T, fds: Option<&[RawDescriptor]>, ) -> VhostUserResult<VhostUserMsgHeader<FrontendReq>>572 fn send_request_with_body<T: Sized + AsBytes>( 573 &self, 574 code: FrontendReq, 575 msg: &T, 576 fds: Option<&[RawDescriptor]>, 577 ) -> VhostUserResult<VhostUserMsgHeader<FrontendReq>> { 578 let hdr = self.new_request_header(code, mem::size_of::<T>() as u32); 579 self.connection.send_message(&hdr, msg, fds)?; 580 Ok(hdr) 581 } 582 send_request_with_payload<T: Sized + AsBytes>( &self, code: FrontendReq, msg: &T, payload: &[u8], fds: Option<&[RawDescriptor]>, ) -> VhostUserResult<VhostUserMsgHeader<FrontendReq>>583 fn send_request_with_payload<T: Sized + AsBytes>( 584 &self, 585 code: FrontendReq, 586 msg: &T, 587 payload: &[u8], 588 fds: Option<&[RawDescriptor]>, 589 ) -> VhostUserResult<VhostUserMsgHeader<FrontendReq>> { 590 if let Some(fd_arr) = fds { 591 if fd_arr.len() > MAX_ATTACHED_FD_ENTRIES { 592 return Err(VhostUserError::InvalidParam); 593 } 594 } 595 let len = mem::size_of::<T>() 596 .checked_add(payload.len()) 597 .ok_or(VhostUserError::OversizedMsg)?; 598 let hdr = self.new_request_header( 599 code, 600 len.try_into().map_err(VhostUserError::InvalidCastToInt)?, 601 ); 602 self.connection 603 .send_message_with_payload(&hdr, msg, payload, fds)?; 604 Ok(hdr) 605 } 606 send_fd_for_vring( &self, code: FrontendReq, queue_index: usize, fd: RawDescriptor, ) -> VhostUserResult<VhostUserMsgHeader<FrontendReq>>607 fn send_fd_for_vring( 608 &self, 609 code: FrontendReq, 610 queue_index: usize, 611 fd: RawDescriptor, 612 ) -> VhostUserResult<VhostUserMsgHeader<FrontendReq>> { 613 // Bits (0-7) of the payload contain the vring index. Bit 8 is the invalid FD flag. 614 // This flag is set when there is no file descriptor in the ancillary data. This signals 615 // that polling will be used instead of waiting for the call. 616 let msg = VhostUserU64::new(queue_index as u64); 617 let hdr = self.new_request_header(code, mem::size_of::<VhostUserU64>() as u32); 618 self.connection.send_message(&hdr, &msg, Some(&[fd]))?; 619 Ok(hdr) 620 } 621 recv_reply<T: Sized + FromBytes + AsBytes + Default + VhostUserMsgValidator>( &self, hdr: &VhostUserMsgHeader<FrontendReq>, ) -> VhostUserResult<T>622 fn recv_reply<T: Sized + FromBytes + AsBytes + Default + VhostUserMsgValidator>( 623 &self, 624 hdr: &VhostUserMsgHeader<FrontendReq>, 625 ) -> VhostUserResult<T> { 626 if hdr.is_reply() { 627 return Err(VhostUserError::InvalidParam); 628 } 629 let (reply, body, rfds) = self.connection.recv_message::<T>()?; 630 if !reply.is_reply_for(hdr) || !rfds.is_empty() || !body.is_valid() { 631 return Err(VhostUserError::InvalidMessage); 632 } 633 Ok(body) 634 } 635 recv_reply_with_files<T: Sized + AsBytes + FromBytes + Default + VhostUserMsgValidator>( &self, hdr: &VhostUserMsgHeader<FrontendReq>, ) -> VhostUserResult<(T, Vec<File>)>636 fn recv_reply_with_files<T: Sized + AsBytes + FromBytes + Default + VhostUserMsgValidator>( 637 &self, 638 hdr: &VhostUserMsgHeader<FrontendReq>, 639 ) -> VhostUserResult<(T, Vec<File>)> { 640 if hdr.is_reply() { 641 return Err(VhostUserError::InvalidParam); 642 } 643 644 let (reply, body, files) = self.connection.recv_message::<T>()?; 645 if !reply.is_reply_for(hdr) || files.is_empty() || !body.is_valid() { 646 return Err(VhostUserError::InvalidMessage); 647 } 648 Ok((body, files)) 649 } 650 recv_reply_with_payload<T: Sized + AsBytes + FromBytes + Default + VhostUserMsgValidator>( &self, hdr: &VhostUserMsgHeader<FrontendReq>, ) -> VhostUserResult<(T, Vec<u8>, Vec<File>)>651 fn recv_reply_with_payload<T: Sized + AsBytes + FromBytes + Default + VhostUserMsgValidator>( 652 &self, 653 hdr: &VhostUserMsgHeader<FrontendReq>, 654 ) -> VhostUserResult<(T, Vec<u8>, Vec<File>)> { 655 if hdr.is_reply() { 656 return Err(VhostUserError::InvalidParam); 657 } 658 659 let (reply, body, buf, files) = self.connection.recv_message_with_payload::<T>()?; 660 if !reply.is_reply_for(hdr) || !files.is_empty() || !body.is_valid() { 661 return Err(VhostUserError::InvalidMessage); 662 } 663 664 Ok((body, buf, files)) 665 } 666 wait_for_ack(&self, hdr: &VhostUserMsgHeader<FrontendReq>) -> VhostUserResult<()>667 fn wait_for_ack(&self, hdr: &VhostUserMsgHeader<FrontendReq>) -> VhostUserResult<()> { 668 if self.acked_protocol_features & VhostUserProtocolFeatures::REPLY_ACK.bits() == 0 669 || !hdr.is_need_reply() 670 { 671 return Ok(()); 672 } 673 674 let (reply, body, rfds) = self.connection.recv_message::<VhostUserU64>()?; 675 if !reply.is_reply_for(hdr) || !rfds.is_empty() || !body.is_valid() { 676 return Err(VhostUserError::InvalidMessage); 677 } 678 if body.value != 0 { 679 return Err(VhostUserError::BackendInternalError); 680 } 681 Ok(()) 682 } 683 is_feature_mq_available(&self) -> bool684 fn is_feature_mq_available(&self) -> bool { 685 self.acked_protocol_features & VhostUserProtocolFeatures::MQ.bits() != 0 686 } 687 688 #[inline] new_request_header( &self, request: FrontendReq, size: u32, ) -> VhostUserMsgHeader<FrontendReq>689 fn new_request_header( 690 &self, 691 request: FrontendReq, 692 size: u32, 693 ) -> VhostUserMsgHeader<FrontendReq> { 694 VhostUserMsgHeader::new(request, 0x1, size) 695 } 696 } 697 698 // TODO(b/221882601): likely need pairs of RDs and/or SharedMemory to represent mmaps on Windows. 699 /// Context object to pass guest memory configuration to BackendClient::set_mem_table(). 700 struct VhostUserMemoryContext { 701 regions: VhostUserMemoryPayload, 702 fds: Vec<RawDescriptor>, 703 } 704 705 impl VhostUserMemoryContext { 706 /// Create a context object. new() -> Self707 pub fn new() -> Self { 708 VhostUserMemoryContext { 709 regions: VhostUserMemoryPayload::new(), 710 fds: Vec::new(), 711 } 712 } 713 714 /// Append a user memory region and corresponding RawDescriptor into the context object. append(&mut self, region: &VhostUserMemoryRegion, fd: RawDescriptor)715 pub fn append(&mut self, region: &VhostUserMemoryRegion, fd: RawDescriptor) { 716 self.regions.push(*region); 717 self.fds.push(fd); 718 } 719 } 720 721 #[cfg(test)] 722 mod tests { 723 use base::INVALID_DESCRIPTOR; 724 use tempfile::tempfile; 725 726 use super::*; 727 use crate::tests::create_pair; 728 729 const BUFFER_SIZE: usize = 0x1001; 730 731 #[test] create_backend_client()732 fn create_backend_client() { 733 let (backend_client, peer) = create_pair(); 734 735 assert!(backend_client.connection.as_raw_descriptor() != INVALID_DESCRIPTOR); 736 // Send two messages continuously 737 backend_client.set_owner().unwrap(); 738 backend_client.reset_owner().unwrap(); 739 740 let (hdr, rfds) = peer.recv_header().unwrap(); 741 assert_eq!(hdr.get_code(), Ok(FrontendReq::SET_OWNER)); 742 assert_eq!(hdr.get_size(), 0); 743 assert_eq!(hdr.get_version(), 0x1); 744 assert!(rfds.is_empty()); 745 746 let (hdr, rfds) = peer.recv_header().unwrap(); 747 assert_eq!(hdr.get_code(), Ok(FrontendReq::RESET_OWNER)); 748 assert_eq!(hdr.get_size(), 0); 749 assert_eq!(hdr.get_version(), 0x1); 750 assert!(rfds.is_empty()); 751 } 752 753 #[test] test_features()754 fn test_features() { 755 let (mut backend_client, peer) = create_pair(); 756 757 backend_client.set_owner().unwrap(); 758 let (hdr, rfds) = peer.recv_header().unwrap(); 759 assert_eq!(hdr.get_code(), Ok(FrontendReq::SET_OWNER)); 760 assert_eq!(hdr.get_size(), 0); 761 assert_eq!(hdr.get_version(), 0x1); 762 assert!(rfds.is_empty()); 763 764 let hdr = VhostUserMsgHeader::new(FrontendReq::GET_FEATURES, 0x4, 8); 765 let msg = VhostUserU64::new(0x15); 766 peer.send_message(&hdr, &msg, None).unwrap(); 767 let features = backend_client.get_features().unwrap(); 768 assert_eq!(features, 0x15u64); 769 let (_hdr, rfds) = peer.recv_header().unwrap(); 770 assert!(rfds.is_empty()); 771 772 let hdr = VhostUserMsgHeader::new(FrontendReq::SET_FEATURES, 0x4, 8); 773 let msg = VhostUserU64::new(0x15); 774 peer.send_message(&hdr, &msg, None).unwrap(); 775 backend_client.set_features(0x15).unwrap(); 776 let (_hdr, msg, rfds) = peer.recv_message::<VhostUserU64>().unwrap(); 777 assert!(rfds.is_empty()); 778 let val = msg.value; 779 assert_eq!(val, 0x15); 780 781 let hdr = VhostUserMsgHeader::new(FrontendReq::GET_FEATURES, 0x4, 8); 782 let msg = 0x15u32; 783 peer.send_message(&hdr, &msg, None).unwrap(); 784 assert!(backend_client.get_features().is_err()); 785 } 786 787 #[test] test_protocol_features()788 fn test_protocol_features() { 789 let (mut backend_client, peer) = create_pair(); 790 791 backend_client.set_owner().unwrap(); 792 let (hdr, rfds) = peer.recv_header().unwrap(); 793 assert_eq!(hdr.get_code(), Ok(FrontendReq::SET_OWNER)); 794 assert!(rfds.is_empty()); 795 796 assert!(backend_client.get_protocol_features().is_err()); 797 assert!(backend_client 798 .set_protocol_features(VhostUserProtocolFeatures::all()) 799 .is_err()); 800 801 let vfeatures = 0x15 | 1 << VHOST_USER_F_PROTOCOL_FEATURES; 802 let hdr = VhostUserMsgHeader::new(FrontendReq::GET_FEATURES, 0x4, 8); 803 let msg = VhostUserU64::new(vfeatures); 804 peer.send_message(&hdr, &msg, None).unwrap(); 805 let features = backend_client.get_features().unwrap(); 806 assert_eq!(features, vfeatures); 807 let (_hdr, rfds) = peer.recv_header().unwrap(); 808 assert!(rfds.is_empty()); 809 810 backend_client.set_features(vfeatures).unwrap(); 811 let (_hdr, msg, rfds) = peer.recv_message::<VhostUserU64>().unwrap(); 812 assert!(rfds.is_empty()); 813 let val = msg.value; 814 assert_eq!(val, vfeatures); 815 816 let pfeatures = VhostUserProtocolFeatures::all(); 817 let hdr = VhostUserMsgHeader::new(FrontendReq::GET_PROTOCOL_FEATURES, 0x4, 8); 818 let msg = VhostUserU64::new(pfeatures.bits()); 819 peer.send_message(&hdr, &msg, None).unwrap(); 820 let features = backend_client.get_protocol_features().unwrap(); 821 assert_eq!(features, pfeatures); 822 let (_hdr, rfds) = peer.recv_header().unwrap(); 823 assert!(rfds.is_empty()); 824 825 backend_client.set_protocol_features(pfeatures).unwrap(); 826 let (_hdr, msg, rfds) = peer.recv_message::<VhostUserU64>().unwrap(); 827 assert!(rfds.is_empty()); 828 let val = msg.value; 829 assert_eq!(val, pfeatures.bits()); 830 831 let hdr = VhostUserMsgHeader::new(FrontendReq::SET_PROTOCOL_FEATURES, 0x4, 8); 832 let msg = VhostUserU64::new(pfeatures.bits()); 833 peer.send_message(&hdr, &msg, None).unwrap(); 834 assert!(backend_client.get_protocol_features().is_err()); 835 } 836 837 #[test] test_backend_client_set_config_negative()838 fn test_backend_client_set_config_negative() { 839 let (mut backend_client, _peer) = create_pair(); 840 let buf = vec![0x0; BUFFER_SIZE]; 841 842 backend_client 843 .set_config(0x100, VhostUserConfigFlags::WRITABLE, &buf[0..4]) 844 .unwrap_err(); 845 846 backend_client.virtio_features = 0xffff_ffff; 847 backend_client.acked_virtio_features = 0xffff_ffff; 848 backend_client.acked_protocol_features = 0xffff_ffff; 849 850 backend_client 851 .set_config(0, VhostUserConfigFlags::WRITABLE, &buf[0..4]) 852 .unwrap(); 853 backend_client 854 .set_config( 855 VHOST_USER_CONFIG_SIZE, 856 VhostUserConfigFlags::WRITABLE, 857 &buf[0..4], 858 ) 859 .unwrap_err(); 860 backend_client 861 .set_config(0x1000, VhostUserConfigFlags::WRITABLE, &buf[0..4]) 862 .unwrap_err(); 863 backend_client 864 .set_config( 865 0x100, 866 VhostUserConfigFlags::from_bits_retain(0xffff_ffff), 867 &buf[0..4], 868 ) 869 .unwrap_err(); 870 backend_client 871 .set_config(VHOST_USER_CONFIG_SIZE, VhostUserConfigFlags::WRITABLE, &buf) 872 .unwrap_err(); 873 backend_client 874 .set_config(VHOST_USER_CONFIG_SIZE, VhostUserConfigFlags::WRITABLE, &[]) 875 .unwrap_err(); 876 } 877 create_pair2() -> (BackendClient, Connection<FrontendReq>)878 fn create_pair2() -> (BackendClient, Connection<FrontendReq>) { 879 let (mut backend_client, peer) = create_pair(); 880 881 backend_client.virtio_features = 0xffff_ffff; 882 backend_client.acked_virtio_features = 0xffff_ffff; 883 backend_client.acked_protocol_features = 0xffff_ffff; 884 885 (backend_client, peer) 886 } 887 888 #[test] test_backend_client_get_config_negative0()889 fn test_backend_client_get_config_negative0() { 890 let (backend_client, peer) = create_pair2(); 891 let buf = vec![0x0; BUFFER_SIZE]; 892 893 let mut hdr = VhostUserMsgHeader::new(FrontendReq::GET_CONFIG, 0x4, 16); 894 let msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty()); 895 peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None) 896 .unwrap(); 897 assert!(backend_client 898 .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4]) 899 .is_ok()); 900 901 hdr.set_code(FrontendReq::GET_FEATURES); 902 peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None) 903 .unwrap(); 904 assert!(backend_client 905 .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4]) 906 .is_err()); 907 hdr.set_code(FrontendReq::GET_CONFIG); 908 } 909 910 #[test] test_backend_client_get_config_negative1()911 fn test_backend_client_get_config_negative1() { 912 let (backend_client, peer) = create_pair2(); 913 let buf = vec![0x0; BUFFER_SIZE]; 914 915 let mut hdr = VhostUserMsgHeader::new(FrontendReq::GET_CONFIG, 0x4, 16); 916 let msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty()); 917 peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None) 918 .unwrap(); 919 assert!(backend_client 920 .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4]) 921 .is_ok()); 922 923 hdr.set_reply(false); 924 peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None) 925 .unwrap(); 926 assert!(backend_client 927 .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4]) 928 .is_err()); 929 } 930 931 #[test] test_backend_client_get_config_negative2()932 fn test_backend_client_get_config_negative2() { 933 let (backend_client, peer) = create_pair2(); 934 let buf = vec![0x0; BUFFER_SIZE]; 935 936 let hdr = VhostUserMsgHeader::new(FrontendReq::GET_CONFIG, 0x4, 16); 937 let msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty()); 938 peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None) 939 .unwrap(); 940 assert!(backend_client 941 .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4]) 942 .is_ok()); 943 } 944 945 #[test] test_backend_client_get_config_negative3()946 fn test_backend_client_get_config_negative3() { 947 let (backend_client, peer) = create_pair2(); 948 let buf = vec![0x0; BUFFER_SIZE]; 949 950 let hdr = VhostUserMsgHeader::new(FrontendReq::GET_CONFIG, 0x4, 16); 951 let mut msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty()); 952 peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None) 953 .unwrap(); 954 assert!(backend_client 955 .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4]) 956 .is_ok()); 957 958 msg.offset = 0; 959 peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None) 960 .unwrap(); 961 assert!(backend_client 962 .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4]) 963 .is_err()); 964 } 965 966 #[test] test_backend_client_get_config_negative4()967 fn test_backend_client_get_config_negative4() { 968 let (backend_client, peer) = create_pair2(); 969 let buf = vec![0x0; BUFFER_SIZE]; 970 971 let hdr = VhostUserMsgHeader::new(FrontendReq::GET_CONFIG, 0x4, 16); 972 let mut msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty()); 973 peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None) 974 .unwrap(); 975 assert!(backend_client 976 .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4]) 977 .is_ok()); 978 979 msg.offset = 0x101; 980 peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None) 981 .unwrap(); 982 assert!(backend_client 983 .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4]) 984 .is_err()); 985 } 986 987 #[test] test_backend_client_get_config_negative5()988 fn test_backend_client_get_config_negative5() { 989 let (backend_client, peer) = create_pair2(); 990 let buf = vec![0x0; BUFFER_SIZE]; 991 992 let hdr = VhostUserMsgHeader::new(FrontendReq::GET_CONFIG, 0x4, 16); 993 let mut msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty()); 994 peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None) 995 .unwrap(); 996 assert!(backend_client 997 .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4]) 998 .is_ok()); 999 1000 msg.offset = (BUFFER_SIZE) as u32; 1001 peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None) 1002 .unwrap(); 1003 assert!(backend_client 1004 .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4]) 1005 .is_err()); 1006 } 1007 1008 #[test] test_backend_client_get_config_negative6()1009 fn test_backend_client_get_config_negative6() { 1010 let (backend_client, peer) = create_pair2(); 1011 let buf = vec![0x0; BUFFER_SIZE]; 1012 1013 let hdr = VhostUserMsgHeader::new(FrontendReq::GET_CONFIG, 0x4, 16); 1014 let mut msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty()); 1015 peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None) 1016 .unwrap(); 1017 assert!(backend_client 1018 .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4]) 1019 .is_ok()); 1020 1021 msg.size = 6; 1022 peer.send_message_with_payload(&hdr, &msg, &buf[0..6], None) 1023 .unwrap(); 1024 assert!(backend_client 1025 .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4]) 1026 .is_err()); 1027 } 1028 1029 #[test] test_maset_set_mem_table_failure()1030 fn test_maset_set_mem_table_failure() { 1031 let (backend_client, _peer) = create_pair2(); 1032 1033 // set_mem_table() with 0 regions is invalid 1034 backend_client.set_mem_table(&[]).unwrap_err(); 1035 1036 // set_mem_table() with more than MAX_ATTACHED_FD_ENTRIES is invalid 1037 let files: Vec<File> = (0..MAX_ATTACHED_FD_ENTRIES + 1) 1038 .map(|_| tempfile().unwrap()) 1039 .collect(); 1040 let tables: Vec<VhostUserMemoryRegionInfo> = files 1041 .iter() 1042 .map(|f| VhostUserMemoryRegionInfo { 1043 guest_phys_addr: 0, 1044 memory_size: 0x100000, 1045 userspace_addr: 0x800000, 1046 mmap_offset: 0, 1047 mmap_handle: f.as_raw_descriptor(), 1048 }) 1049 .collect(); 1050 backend_client.set_mem_table(&tables).unwrap_err(); 1051 } 1052 } 1053