1 // Copyright 2018 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 mod qcow_raw_file;
6 mod refcount;
7 mod vec_cache;
8
9 use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt};
10 use data_model::{VolatileMemory, VolatileSlice};
11 use libc::{EINVAL, ENOSPC, ENOTSUP};
12 use remain::sorted;
13 use sys_util::{
14 error, FileReadWriteVolatile, FileSetLen, FileSync, PunchHole, SeekHole, WriteZeroes,
15 };
16
17 use std::cmp::min;
18 use std::fmt::{self, Display};
19 use std::fs::File;
20 use std::io::{self, Read, Seek, SeekFrom, Write};
21 use std::mem::size_of;
22 use std::os::unix::io::{AsRawFd, RawFd};
23
24 use crate::qcow_raw_file::QcowRawFile;
25 use crate::refcount::RefCount;
26 use crate::vec_cache::{CacheMap, Cacheable, VecCache};
27
28 #[sorted]
29 #[derive(Debug)]
30 pub enum Error {
31 BackingFilesNotSupported,
32 CompressedBlocksNotSupported,
33 EvictingCache(io::Error),
34 FileTooBig(u64),
35 GettingFileSize(io::Error),
36 GettingRefcount(refcount::Error),
37 InvalidClusterIndex,
38 InvalidClusterSize,
39 InvalidIndex,
40 InvalidL1TableOffset,
41 InvalidMagic,
42 InvalidOffset(u64),
43 InvalidRefcountTableOffset,
44 InvalidRefcountTableSize(u64),
45 NoFreeClusters,
46 NoRefcountClusters,
47 NotEnoughSpaceForRefcounts,
48 OpeningFile(io::Error),
49 ReadingData(io::Error),
50 ReadingHeader(io::Error),
51 ReadingPointers(io::Error),
52 ReadingRefCountBlock(refcount::Error),
53 ReadingRefCounts(io::Error),
54 RebuildingRefCounts(io::Error),
55 SeekingFile(io::Error),
56 SettingFileSize(io::Error),
57 SettingRefcountRefcount(io::Error),
58 SizeTooSmallForNumberOfClusters,
59 TooManyL1Entries(u64),
60 TooManyRefcounts(u64),
61 UnsupportedRefcountOrder,
62 UnsupportedVersion(u32),
63 WritingData(io::Error),
64 WritingHeader(io::Error),
65 }
66
67 pub type Result<T> = std::result::Result<T, Error>;
68
69 impl Display for Error {
70 #[remain::check]
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result71 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
72 use self::Error::*;
73
74 #[sorted]
75 match self {
76 BackingFilesNotSupported => write!(f, "backing files not supported"),
77 CompressedBlocksNotSupported => write!(f, "compressed blocks not supported"),
78 EvictingCache(e) => write!(f, "failed to evict cache: {}", e),
79 FileTooBig(size) => write!(f, "file larger than max of 1TB: {}", size),
80 GettingFileSize(e) => write!(f, "failed to get file size: {}", e),
81 GettingRefcount(e) => write!(f, "failed to get refcount: {}", e),
82 InvalidClusterIndex => write!(f, "invalid cluster index"),
83 InvalidClusterSize => write!(f, "invalid cluster size"),
84 InvalidIndex => write!(f, "invalid index"),
85 InvalidL1TableOffset => write!(f, "invalid L1 table offset"),
86 InvalidMagic => write!(f, "invalid magic"),
87 InvalidOffset(_) => write!(f, "invalid offset"),
88 InvalidRefcountTableOffset => write!(f, "invalid refcount table offset"),
89 InvalidRefcountTableSize(size) => write!(f, "invalid refcount table size: {}", size),
90 NoFreeClusters => write!(f, "no free clusters"),
91 NoRefcountClusters => write!(f, "no refcount clusters"),
92 NotEnoughSpaceForRefcounts => write!(f, "not enough space for refcounts"),
93 OpeningFile(e) => write!(f, "failed to open file: {}", e),
94 ReadingData(e) => write!(f, "failed to read data: {}", e),
95 ReadingHeader(e) => write!(f, "failed to read header: {}", e),
96 ReadingPointers(e) => write!(f, "failed to read pointers: {}", e),
97 ReadingRefCountBlock(e) => write!(f, "failed to read ref count block: {}", e),
98 ReadingRefCounts(e) => write!(f, "failed to read ref counts: {}", e),
99 RebuildingRefCounts(e) => write!(f, "failed to rebuild ref counts: {}", e),
100 SeekingFile(e) => write!(f, "failed to seek file: {}", e),
101 SettingFileSize(e) => write!(f, "failed to set file size: {}", e),
102 SettingRefcountRefcount(e) => write!(f, "failed to set refcount refcount: {}", e),
103 SizeTooSmallForNumberOfClusters => write!(f, "size too small for number of clusters"),
104 TooManyL1Entries(count) => write!(f, "l1 entry table too large: {}", count),
105 TooManyRefcounts(count) => write!(f, "ref count table too large: {}", count),
106 UnsupportedRefcountOrder => write!(f, "unsupported refcount order"),
107 UnsupportedVersion(v) => write!(f, "unsupported version: {}", v),
108 WritingData(e) => write!(f, "failed to write data: {}", e),
109 WritingHeader(e) => write!(f, "failed to write header: {}", e),
110 }
111 }
112 }
113
114 pub enum ImageType {
115 Raw,
116 Qcow2,
117 }
118
119 // QCOW magic constant that starts the header.
120 const QCOW_MAGIC: u32 = 0x5146_49fb;
121 // Default to a cluster size of 2^DEFAULT_CLUSTER_BITS
122 const DEFAULT_CLUSTER_BITS: u32 = 16;
123 // Limit clusters to reasonable sizes. Choose the same limits as qemu. Making the clusters smaller
124 // increases the amount of overhead for book keeping.
125 const MIN_CLUSTER_BITS: u32 = 9;
126 const MAX_CLUSTER_BITS: u32 = 21;
127 // The L1 and RefCount table are kept in RAM, only handle files that require less than 35M entries.
128 // This easily covers 1 TB files. When support for bigger files is needed the assumptions made to
129 // keep these tables in RAM needs to be thrown out.
130 const MAX_RAM_POINTER_TABLE_SIZE: u64 = 35_000_000;
131 // Only support 2 byte refcounts, 2^refcount_order bits.
132 const DEFAULT_REFCOUNT_ORDER: u32 = 4;
133
134 const V3_BARE_HEADER_SIZE: u32 = 104;
135
136 // bits 0-8 and 56-63 are reserved.
137 const L1_TABLE_OFFSET_MASK: u64 = 0x00ff_ffff_ffff_fe00;
138 const L2_TABLE_OFFSET_MASK: u64 = 0x00ff_ffff_ffff_fe00;
139 // Flags
140 const COMPRESSED_FLAG: u64 = 1 << 62;
141 const CLUSTER_USED_FLAG: u64 = 1 << 63;
142 const COMPATIBLE_FEATURES_LAZY_REFCOUNTS: u64 = 1 << 0;
143
144 /// Contains the information from the header of a qcow file.
145 #[derive(Copy, Clone, Debug)]
146 pub struct QcowHeader {
147 pub magic: u32,
148 pub version: u32,
149
150 pub backing_file_offset: u64,
151 pub backing_file_size: u32,
152
153 pub cluster_bits: u32,
154 pub size: u64,
155 pub crypt_method: u32,
156
157 pub l1_size: u32,
158 pub l1_table_offset: u64,
159
160 pub refcount_table_offset: u64,
161 pub refcount_table_clusters: u32,
162
163 pub nb_snapshots: u32,
164 pub snapshots_offset: u64,
165
166 // v3 entries
167 pub incompatible_features: u64,
168 pub compatible_features: u64,
169 pub autoclear_features: u64,
170 pub refcount_order: u32,
171 pub header_size: u32,
172 }
173
174 impl QcowHeader {
175 /// Creates a QcowHeader from a reference to a file.
new(f: &mut File) -> Result<QcowHeader>176 pub fn new(f: &mut File) -> Result<QcowHeader> {
177 f.seek(SeekFrom::Start(0)).map_err(Error::ReadingHeader)?;
178 let magic = f.read_u32::<BigEndian>().map_err(Error::ReadingHeader)?;
179 if magic != QCOW_MAGIC {
180 return Err(Error::InvalidMagic);
181 }
182
183 // Reads the next u32 from the file.
184 fn read_u32_from_file(f: &mut File) -> Result<u32> {
185 f.read_u32::<BigEndian>().map_err(Error::ReadingHeader)
186 }
187
188 // Reads the next u64 from the file.
189 fn read_u64_from_file(f: &mut File) -> Result<u64> {
190 f.read_u64::<BigEndian>().map_err(Error::ReadingHeader)
191 }
192
193 Ok(QcowHeader {
194 magic,
195 version: read_u32_from_file(f)?,
196 backing_file_offset: read_u64_from_file(f)?,
197 backing_file_size: read_u32_from_file(f)?,
198 cluster_bits: read_u32_from_file(f)?,
199 size: read_u64_from_file(f)?,
200 crypt_method: read_u32_from_file(f)?,
201 l1_size: read_u32_from_file(f)?,
202 l1_table_offset: read_u64_from_file(f)?,
203 refcount_table_offset: read_u64_from_file(f)?,
204 refcount_table_clusters: read_u32_from_file(f)?,
205 nb_snapshots: read_u32_from_file(f)?,
206 snapshots_offset: read_u64_from_file(f)?,
207 incompatible_features: read_u64_from_file(f)?,
208 compatible_features: read_u64_from_file(f)?,
209 autoclear_features: read_u64_from_file(f)?,
210 refcount_order: read_u32_from_file(f)?,
211 header_size: read_u32_from_file(f)?,
212 })
213 }
214
215 /// Create a header for the given `size`.
create_for_size(size: u64) -> QcowHeader216 pub fn create_for_size(size: u64) -> QcowHeader {
217 let cluster_bits: u32 = DEFAULT_CLUSTER_BITS;
218 let cluster_size: u32 = 0x01 << cluster_bits;
219 // L2 blocks are always one cluster long. They contain cluster_size/sizeof(u64) addresses.
220 let l2_size: u32 = cluster_size / size_of::<u64>() as u32;
221 let num_clusters: u32 = div_round_up_u64(size, u64::from(cluster_size)) as u32;
222 let num_l2_clusters: u32 = div_round_up_u32(num_clusters, l2_size);
223 let l1_clusters: u32 = div_round_up_u32(num_l2_clusters, cluster_size);
224 let header_clusters = div_round_up_u32(size_of::<QcowHeader>() as u32, cluster_size);
225 QcowHeader {
226 magic: QCOW_MAGIC,
227 version: 3,
228 backing_file_offset: 0,
229 backing_file_size: 0,
230 cluster_bits: DEFAULT_CLUSTER_BITS,
231 size,
232 crypt_method: 0,
233 l1_size: num_l2_clusters,
234 l1_table_offset: u64::from(cluster_size),
235 // The refcount table is after l1 + header.
236 refcount_table_offset: u64::from(cluster_size * (l1_clusters + 1)),
237 refcount_table_clusters: {
238 // Pre-allocate enough clusters for the entire refcount table as it must be
239 // continuous in the file. Allocate enough space to refcount all clusters, including
240 // the refcount clusters.
241 let max_refcount_clusters = max_refcount_clusters(
242 DEFAULT_REFCOUNT_ORDER,
243 cluster_size,
244 num_clusters + l1_clusters + num_l2_clusters + header_clusters,
245 ) as u32;
246 // The refcount table needs to store the offset of each refcount cluster.
247 div_round_up_u32(
248 max_refcount_clusters * size_of::<u64>() as u32,
249 cluster_size,
250 )
251 },
252 nb_snapshots: 0,
253 snapshots_offset: 0,
254 incompatible_features: 0,
255 compatible_features: 0,
256 autoclear_features: 0,
257 refcount_order: DEFAULT_REFCOUNT_ORDER,
258 header_size: V3_BARE_HEADER_SIZE,
259 }
260 }
261
262 /// Write the header to `file`.
write_to<F: Write + Seek>(&self, file: &mut F) -> Result<()>263 pub fn write_to<F: Write + Seek>(&self, file: &mut F) -> Result<()> {
264 // Writes the next u32 to the file.
265 fn write_u32_to_file<F: Write>(f: &mut F, value: u32) -> Result<()> {
266 f.write_u32::<BigEndian>(value)
267 .map_err(Error::WritingHeader)
268 }
269
270 // Writes the next u64 to the file.
271 fn write_u64_to_file<F: Write>(f: &mut F, value: u64) -> Result<()> {
272 f.write_u64::<BigEndian>(value)
273 .map_err(Error::WritingHeader)
274 }
275
276 write_u32_to_file(file, self.magic)?;
277 write_u32_to_file(file, self.version)?;
278 write_u64_to_file(file, self.backing_file_offset)?;
279 write_u32_to_file(file, self.backing_file_size)?;
280 write_u32_to_file(file, self.cluster_bits)?;
281 write_u64_to_file(file, self.size)?;
282 write_u32_to_file(file, self.crypt_method)?;
283 write_u32_to_file(file, self.l1_size)?;
284 write_u64_to_file(file, self.l1_table_offset)?;
285 write_u64_to_file(file, self.refcount_table_offset)?;
286 write_u32_to_file(file, self.refcount_table_clusters)?;
287 write_u32_to_file(file, self.nb_snapshots)?;
288 write_u64_to_file(file, self.snapshots_offset)?;
289 write_u64_to_file(file, self.incompatible_features)?;
290 write_u64_to_file(file, self.compatible_features)?;
291 write_u64_to_file(file, self.autoclear_features)?;
292 write_u32_to_file(file, self.refcount_order)?;
293 write_u32_to_file(file, self.header_size)?;
294
295 // Set the file length by seeking and writing a zero to the last byte. This avoids needing
296 // a `File` instead of anything that implements seek as the `file` argument.
297 // Zeros out the l1 and refcount table clusters.
298 let cluster_size = 0x01u64 << self.cluster_bits;
299 let refcount_blocks_size = u64::from(self.refcount_table_clusters) * cluster_size;
300 file.seek(SeekFrom::Start(
301 self.refcount_table_offset + refcount_blocks_size - 2,
302 ))
303 .map_err(Error::WritingHeader)?;
304 file.write(&[0u8]).map_err(Error::WritingHeader)?;
305
306 Ok(())
307 }
308 }
309
max_refcount_clusters(refcount_order: u32, cluster_size: u32, num_clusters: u32) -> u64310 fn max_refcount_clusters(refcount_order: u32, cluster_size: u32, num_clusters: u32) -> u64 {
311 // Use u64 as the product of the u32 inputs can overflow.
312 let refcount_bytes = (0x01 << refcount_order as u64) / 8;
313 let for_data = div_round_up_u64(num_clusters as u64 * refcount_bytes, cluster_size as u64);
314 let for_refcounts = div_round_up_u64(for_data * refcount_bytes, cluster_size as u64);
315 for_data + for_refcounts
316 }
317
318 /// Represents a qcow2 file. This is a sparse file format maintained by the qemu project.
319 /// Full documentation of the format can be found in the qemu repository.
320 ///
321 /// # Example
322 ///
323 /// ```
324 /// # use std::io::{Read, Seek, SeekFrom};
325 /// # use qcow::{self, QcowFile};
326 /// # fn test(file: std::fs::File) -> std::io::Result<()> {
327 /// let mut q = QcowFile::from(file).expect("Can't open qcow file");
328 /// let mut buf = [0u8; 12];
329 /// q.seek(SeekFrom::Start(10 as u64))?;
330 /// q.read(&mut buf[..])?;
331 /// # Ok(())
332 /// # }
333 /// ```
334 #[derive(Debug)]
335 pub struct QcowFile {
336 raw_file: QcowRawFile,
337 header: QcowHeader,
338 l1_table: VecCache<u64>,
339 l2_entries: u64,
340 l2_cache: CacheMap<VecCache<u64>>,
341 refcounts: RefCount,
342 current_offset: u64,
343 unref_clusters: Vec<u64>, // List of freshly unreferenced clusters.
344 // List of unreferenced clusters available to be used. unref clusters become available once the
345 // removal of references to them have been synced to disk.
346 avail_clusters: Vec<u64>,
347 //TODO(dgreid) Add support for backing files. - backing_file: Option<Box<QcowFile<T>>>,
348 }
349
350 impl QcowFile {
351 /// Creates a QcowFile from `file`. File must be a valid qcow2 image.
from(mut file: File) -> Result<QcowFile>352 pub fn from(mut file: File) -> Result<QcowFile> {
353 let header = QcowHeader::new(&mut file)?;
354
355 // Only v3 files are supported.
356 if header.version != 3 {
357 return Err(Error::UnsupportedVersion(header.version));
358 }
359
360 let cluster_bits: u32 = header.cluster_bits;
361 if cluster_bits < MIN_CLUSTER_BITS || cluster_bits > MAX_CLUSTER_BITS {
362 return Err(Error::InvalidClusterSize);
363 }
364 let cluster_size = 0x01u64 << cluster_bits;
365
366 // No current support for backing files.
367 if header.backing_file_offset != 0 {
368 return Err(Error::BackingFilesNotSupported);
369 }
370
371 // Only support two byte refcounts.
372 let refcount_bits: u64 = 0x01u64
373 .checked_shl(header.refcount_order)
374 .ok_or(Error::UnsupportedRefcountOrder)?;
375 if refcount_bits != 16 {
376 return Err(Error::UnsupportedRefcountOrder);
377 }
378 let refcount_bytes = (refcount_bits + 7) / 8;
379
380 // Need at least one refcount cluster
381 if header.refcount_table_clusters == 0 {
382 return Err(Error::NoRefcountClusters);
383 }
384 offset_is_cluster_boundary(header.backing_file_offset, header.cluster_bits)?;
385 offset_is_cluster_boundary(header.l1_table_offset, header.cluster_bits)?;
386 offset_is_cluster_boundary(header.refcount_table_offset, header.cluster_bits)?;
387 offset_is_cluster_boundary(header.snapshots_offset, header.cluster_bits)?;
388
389 // The first cluster should always have a non-zero refcount, so if it is 0,
390 // this is an old file with broken refcounts, which requires a rebuild.
391 let mut refcount_rebuild_required = true;
392 file.seek(SeekFrom::Start(header.refcount_table_offset))
393 .map_err(Error::SeekingFile)?;
394 let first_refblock_addr = file.read_u64::<BigEndian>().map_err(Error::ReadingHeader)?;
395 if first_refblock_addr != 0 {
396 file.seek(SeekFrom::Start(first_refblock_addr))
397 .map_err(Error::SeekingFile)?;
398 let first_cluster_refcount =
399 file.read_u16::<BigEndian>().map_err(Error::ReadingHeader)?;
400 if first_cluster_refcount != 0 {
401 refcount_rebuild_required = false;
402 }
403 }
404
405 if (header.compatible_features & COMPATIBLE_FEATURES_LAZY_REFCOUNTS) != 0 {
406 refcount_rebuild_required = true;
407 }
408
409 let mut raw_file =
410 QcowRawFile::from(file, cluster_size).ok_or(Error::InvalidClusterSize)?;
411 if refcount_rebuild_required {
412 QcowFile::rebuild_refcounts(&mut raw_file, header)?;
413 }
414
415 let l2_size = cluster_size / size_of::<u64>() as u64;
416 let num_clusters = div_round_up_u64(header.size, cluster_size);
417 let num_l2_clusters = div_round_up_u64(num_clusters, l2_size);
418 let l1_clusters = div_round_up_u64(num_l2_clusters, cluster_size);
419 let header_clusters = div_round_up_u64(size_of::<QcowHeader>() as u64, cluster_size);
420 if num_l2_clusters > MAX_RAM_POINTER_TABLE_SIZE {
421 return Err(Error::TooManyL1Entries(num_l2_clusters));
422 }
423 let l1_table = VecCache::from_vec(
424 raw_file
425 .read_pointer_table(
426 header.l1_table_offset,
427 num_l2_clusters,
428 Some(L1_TABLE_OFFSET_MASK),
429 )
430 .map_err(Error::ReadingHeader)?,
431 );
432
433 let num_clusters = div_round_up_u64(header.size, cluster_size);
434 let refcount_clusters = max_refcount_clusters(
435 header.refcount_order,
436 cluster_size as u32,
437 (num_clusters + l1_clusters + num_l2_clusters + header_clusters) as u32,
438 );
439 if l1_clusters + refcount_clusters > MAX_RAM_POINTER_TABLE_SIZE {
440 return Err(Error::TooManyRefcounts(refcount_clusters));
441 }
442 let refcount_block_entries = cluster_size / refcount_bytes;
443 let refcounts = RefCount::new(
444 &mut raw_file,
445 header.refcount_table_offset,
446 refcount_clusters,
447 refcount_block_entries,
448 cluster_size,
449 )
450 .map_err(Error::ReadingRefCounts)?;
451
452 let l2_entries = cluster_size / size_of::<u64>() as u64;
453
454 let mut qcow = QcowFile {
455 raw_file,
456 header,
457 l1_table,
458 l2_entries,
459 l2_cache: CacheMap::new(100),
460 refcounts,
461 current_offset: 0,
462 unref_clusters: Vec::new(),
463 avail_clusters: Vec::new(),
464 };
465
466 // Check that the L1 and refcount tables fit in a 64bit address space.
467 qcow.header
468 .l1_table_offset
469 .checked_add(qcow.l1_address_offset(qcow.virtual_size()))
470 .ok_or(Error::InvalidL1TableOffset)?;
471 qcow.header
472 .refcount_table_offset
473 .checked_add(u64::from(qcow.header.refcount_table_clusters) * cluster_size)
474 .ok_or(Error::InvalidRefcountTableOffset)?;
475
476 qcow.find_avail_clusters()?;
477
478 Ok(qcow)
479 }
480
481 /// Creates a new QcowFile at the given path.
new(mut file: File, virtual_size: u64) -> Result<QcowFile>482 pub fn new(mut file: File, virtual_size: u64) -> Result<QcowFile> {
483 let header = QcowHeader::create_for_size(virtual_size);
484 file.seek(SeekFrom::Start(0)).map_err(Error::SeekingFile)?;
485 header.write_to(&mut file)?;
486
487 let mut qcow = Self::from(file)?;
488
489 // Set the refcount for each refcount table cluster.
490 let cluster_size = 0x01u64 << qcow.header.cluster_bits;
491 let refcount_table_base = qcow.header.refcount_table_offset as u64;
492 let end_cluster_addr =
493 refcount_table_base + u64::from(qcow.header.refcount_table_clusters) * cluster_size;
494
495 let mut cluster_addr = 0;
496 while cluster_addr < end_cluster_addr {
497 let mut unref_clusters = qcow
498 .set_cluster_refcount(cluster_addr, 1)
499 .map_err(Error::SettingRefcountRefcount)?;
500 qcow.unref_clusters.append(&mut unref_clusters);
501 cluster_addr += cluster_size;
502 }
503
504 Ok(qcow)
505 }
506
507 /// Returns the `QcowHeader` for this file.
header(&self) -> &QcowHeader508 pub fn header(&self) -> &QcowHeader {
509 &self.header
510 }
511
512 /// Returns the L1 lookup table for this file. This is only useful for debugging.
l1_table(&self) -> &[u64]513 pub fn l1_table(&self) -> &[u64] {
514 &self.l1_table.get_values()
515 }
516
517 /// Returns an L2_table of cluster addresses, only used for debugging.
l2_table(&mut self, l1_index: usize) -> Result<Option<&[u64]>>518 pub fn l2_table(&mut self, l1_index: usize) -> Result<Option<&[u64]>> {
519 let l2_addr_disk = *self.l1_table.get(l1_index).ok_or(Error::InvalidIndex)?;
520
521 if l2_addr_disk == 0 {
522 // Reading from an unallocated cluster will return zeros.
523 return Ok(None);
524 }
525
526 if !self.l2_cache.contains_key(&l1_index) {
527 // Not in the cache.
528 let table = VecCache::from_vec(
529 Self::read_l2_cluster(&mut self.raw_file, l2_addr_disk)
530 .map_err(Error::ReadingPointers)?,
531 );
532 let l1_table = &self.l1_table;
533 let raw_file = &mut self.raw_file;
534 self.l2_cache
535 .insert(l1_index, table, |index, evicted| {
536 raw_file.write_pointer_table(
537 l1_table[index],
538 evicted.get_values(),
539 CLUSTER_USED_FLAG,
540 )
541 })
542 .map_err(Error::EvictingCache)?;
543 }
544
545 // The index must exist as it was just inserted if it didn't already.
546 Ok(Some(self.l2_cache.get(&l1_index).unwrap().get_values()))
547 }
548
549 /// Returns the refcount table for this file. This is only useful for debugging.
ref_table(&self) -> &[u64]550 pub fn ref_table(&self) -> &[u64] {
551 &self.refcounts.ref_table()
552 }
553
554 /// Returns the `index`th refcount block from the file.
refcount_block(&mut self, index: usize) -> Result<Option<&[u16]>>555 pub fn refcount_block(&mut self, index: usize) -> Result<Option<&[u16]>> {
556 self.refcounts
557 .refcount_block(&mut self.raw_file, index)
558 .map_err(Error::ReadingRefCountBlock)
559 }
560
561 /// Returns the first cluster in the file with a 0 refcount. Used for testing.
first_zero_refcount(&mut self) -> Result<Option<u64>>562 pub fn first_zero_refcount(&mut self) -> Result<Option<u64>> {
563 let file_size = self
564 .raw_file
565 .file_mut()
566 .metadata()
567 .map_err(Error::GettingFileSize)?
568 .len();
569 let cluster_size = 0x01u64 << self.header.cluster_bits;
570
571 let mut cluster_addr = 0;
572 while cluster_addr < file_size {
573 let cluster_refcount = self
574 .refcounts
575 .get_cluster_refcount(&mut self.raw_file, cluster_addr)
576 .map_err(Error::GettingRefcount)?;
577 if cluster_refcount == 0 {
578 return Ok(Some(cluster_addr));
579 }
580 cluster_addr += cluster_size;
581 }
582 Ok(None)
583 }
584
find_avail_clusters(&mut self) -> Result<()>585 fn find_avail_clusters(&mut self) -> Result<()> {
586 let cluster_size = self.raw_file.cluster_size();
587
588 let file_size = self
589 .raw_file
590 .file_mut()
591 .metadata()
592 .map_err(Error::GettingFileSize)?
593 .len();
594
595 for i in (0..file_size).step_by(cluster_size as usize) {
596 let refcount = self
597 .refcounts
598 .get_cluster_refcount(&mut self.raw_file, i)
599 .map_err(Error::GettingRefcount)?;
600 if refcount == 0 {
601 self.avail_clusters.push(i);
602 }
603 }
604
605 Ok(())
606 }
607
608 /// Rebuild the reference count tables.
rebuild_refcounts(raw_file: &mut QcowRawFile, header: QcowHeader) -> Result<()>609 fn rebuild_refcounts(raw_file: &mut QcowRawFile, header: QcowHeader) -> Result<()> {
610 fn add_ref(refcounts: &mut [u16], cluster_size: u64, cluster_address: u64) -> Result<()> {
611 let idx = (cluster_address / cluster_size) as usize;
612 if idx >= refcounts.len() {
613 return Err(Error::InvalidClusterIndex);
614 }
615 refcounts[idx] += 1;
616 Ok(())
617 }
618
619 // Add a reference to the first cluster (header plus extensions).
620 fn set_header_refcount(refcounts: &mut [u16], cluster_size: u64) -> Result<()> {
621 add_ref(refcounts, cluster_size, 0)
622 }
623
624 // Add references to the L1 table clusters.
625 fn set_l1_refcounts(
626 refcounts: &mut [u16],
627 header: QcowHeader,
628 cluster_size: u64,
629 ) -> Result<()> {
630 let l1_clusters = div_round_up_u64(header.l1_size as u64, cluster_size);
631 let l1_table_offset = header.l1_table_offset;
632 for i in 0..l1_clusters {
633 add_ref(refcounts, cluster_size, l1_table_offset + i * cluster_size)?;
634 }
635 Ok(())
636 }
637
638 // Traverse the L1 and L2 tables to find all reachable data clusters.
639 fn set_data_refcounts(
640 refcounts: &mut [u16],
641 header: QcowHeader,
642 cluster_size: u64,
643 raw_file: &mut QcowRawFile,
644 ) -> Result<()> {
645 let l1_table = raw_file
646 .read_pointer_table(
647 header.l1_table_offset,
648 header.l1_size as u64,
649 Some(L1_TABLE_OFFSET_MASK),
650 )
651 .map_err(Error::ReadingPointers)?;
652 for l1_index in 0..header.l1_size as usize {
653 let l2_addr_disk = *l1_table.get(l1_index).ok_or(Error::InvalidIndex)?;
654 if l2_addr_disk != 0 {
655 // Add a reference to the L2 table cluster itself.
656 add_ref(refcounts, cluster_size, l2_addr_disk)?;
657
658 // Read the L2 table and find all referenced data clusters.
659 let l2_table = raw_file
660 .read_pointer_table(
661 l2_addr_disk,
662 cluster_size / size_of::<u64>() as u64,
663 Some(L2_TABLE_OFFSET_MASK),
664 )
665 .map_err(Error::ReadingPointers)?;
666 for data_cluster_addr in l2_table {
667 if data_cluster_addr != 0 {
668 add_ref(refcounts, cluster_size, data_cluster_addr)?;
669 }
670 }
671 }
672 }
673
674 Ok(())
675 }
676
677 // Add references to the top-level refcount table clusters.
678 fn set_refcount_table_refcounts(
679 refcounts: &mut [u16],
680 header: QcowHeader,
681 cluster_size: u64,
682 ) -> Result<()> {
683 let refcount_table_offset = header.refcount_table_offset;
684 for i in 0..header.refcount_table_clusters as u64 {
685 add_ref(
686 refcounts,
687 cluster_size,
688 refcount_table_offset + i * cluster_size,
689 )?;
690 }
691 Ok(())
692 }
693
694 // Allocate clusters for refblocks.
695 // This needs to be done last so that we have the correct refcounts for all other
696 // clusters.
697 fn alloc_refblocks(
698 refcounts: &mut [u16],
699 cluster_size: u64,
700 refblock_clusters: u64,
701 pointers_per_cluster: u64,
702 ) -> Result<Vec<u64>> {
703 let refcount_table_entries = div_round_up_u64(refblock_clusters, pointers_per_cluster);
704 let mut ref_table = vec![0; refcount_table_entries as usize];
705 let mut first_free_cluster: u64 = 0;
706 for refblock_addr in &mut ref_table {
707 while refcounts[first_free_cluster as usize] != 0 {
708 first_free_cluster += 1;
709 if first_free_cluster >= refcounts.len() as u64 {
710 return Err(Error::NotEnoughSpaceForRefcounts);
711 }
712 }
713
714 *refblock_addr = first_free_cluster * cluster_size;
715 add_ref(refcounts, cluster_size, *refblock_addr)?;
716
717 first_free_cluster += 1;
718 }
719
720 Ok(ref_table)
721 }
722
723 // Write the updated reference count blocks and reftable.
724 fn write_refblocks(
725 refcounts: &[u16],
726 mut header: QcowHeader,
727 ref_table: &[u64],
728 raw_file: &mut QcowRawFile,
729 refcount_block_entries: u64,
730 ) -> Result<()> {
731 // Rewrite the header with lazy refcounts enabled while we are rebuilding the tables.
732 header.compatible_features |= COMPATIBLE_FEATURES_LAZY_REFCOUNTS;
733 raw_file
734 .file_mut()
735 .seek(SeekFrom::Start(0))
736 .map_err(Error::SeekingFile)?;
737 header.write_to(raw_file.file_mut())?;
738
739 for (i, refblock_addr) in ref_table.iter().enumerate() {
740 // Write a block of refcounts to the location indicated by refblock_addr.
741 let refblock_start = i * (refcount_block_entries as usize);
742 let refblock_end = min(
743 refcounts.len(),
744 refblock_start + refcount_block_entries as usize,
745 );
746 let refblock = &refcounts[refblock_start..refblock_end];
747 raw_file
748 .write_refcount_block(*refblock_addr, refblock)
749 .map_err(Error::WritingHeader)?;
750
751 // If this is the last (partial) cluster, pad it out to a full refblock cluster.
752 if refblock.len() < refcount_block_entries as usize {
753 let refblock_padding =
754 vec![0u16; refcount_block_entries as usize - refblock.len()];
755 raw_file
756 .write_refcount_block(
757 *refblock_addr + refblock.len() as u64 * 2,
758 &refblock_padding,
759 )
760 .map_err(Error::WritingHeader)?;
761 }
762 }
763
764 // Rewrite the top-level refcount table.
765 raw_file
766 .write_pointer_table(header.refcount_table_offset, &ref_table, 0)
767 .map_err(Error::WritingHeader)?;
768
769 // Rewrite the header again, now with lazy refcounts disabled.
770 header.compatible_features &= !COMPATIBLE_FEATURES_LAZY_REFCOUNTS;
771 raw_file
772 .file_mut()
773 .seek(SeekFrom::Start(0))
774 .map_err(Error::SeekingFile)?;
775 header.write_to(raw_file.file_mut())?;
776
777 Ok(())
778 }
779
780 let cluster_size = raw_file.cluster_size();
781
782 let file_size = raw_file
783 .file_mut()
784 .metadata()
785 .map_err(Error::GettingFileSize)?
786 .len();
787
788 let refcount_bits = 1u64 << header.refcount_order;
789 let refcount_bytes = div_round_up_u64(refcount_bits, 8);
790 let refcount_block_entries = cluster_size / refcount_bytes;
791 let pointers_per_cluster = cluster_size / size_of::<u64>() as u64;
792 let data_clusters = div_round_up_u64(header.size, cluster_size);
793 let l2_clusters = div_round_up_u64(data_clusters, pointers_per_cluster);
794 let l1_clusters = div_round_up_u64(l2_clusters, cluster_size);
795 let header_clusters = div_round_up_u64(size_of::<QcowHeader>() as u64, cluster_size);
796 let max_clusters = data_clusters + l2_clusters + l1_clusters + header_clusters;
797 let mut max_valid_cluster_index = max_clusters;
798 let refblock_clusters = div_round_up_u64(max_valid_cluster_index, refcount_block_entries);
799 let reftable_clusters = div_round_up_u64(refblock_clusters, pointers_per_cluster);
800 // Account for refblocks and the ref table size needed to address them.
801 let refblocks_for_refs = div_round_up_u64(
802 refblock_clusters + reftable_clusters,
803 refcount_block_entries,
804 );
805 let reftable_clusters_for_refs =
806 div_round_up_u64(refblocks_for_refs, refcount_block_entries);
807 max_valid_cluster_index += refblock_clusters + reftable_clusters;
808 max_valid_cluster_index += refblocks_for_refs + reftable_clusters_for_refs;
809
810 if max_valid_cluster_index > MAX_RAM_POINTER_TABLE_SIZE {
811 return Err(Error::InvalidRefcountTableSize(max_valid_cluster_index));
812 }
813
814 let max_valid_cluster_offset = max_valid_cluster_index * cluster_size;
815 if max_valid_cluster_offset < file_size - cluster_size {
816 return Err(Error::InvalidRefcountTableSize(max_valid_cluster_offset));
817 }
818
819 let mut refcounts = vec![0; max_valid_cluster_index as usize];
820
821 // Find all references clusters and rebuild refcounts.
822 set_header_refcount(&mut refcounts, cluster_size)?;
823 set_l1_refcounts(&mut refcounts, header, cluster_size)?;
824 set_data_refcounts(&mut refcounts, header, cluster_size, raw_file)?;
825 set_refcount_table_refcounts(&mut refcounts, header, cluster_size)?;
826
827 // Allocate clusters to store the new reference count blocks.
828 let ref_table = alloc_refblocks(
829 &mut refcounts,
830 cluster_size,
831 refblock_clusters,
832 pointers_per_cluster,
833 )?;
834
835 // Write updated reference counts and point the reftable at them.
836 write_refblocks(
837 &refcounts,
838 header,
839 &ref_table,
840 raw_file,
841 refcount_block_entries,
842 )
843 }
844
845 // Limits the range so that it doesn't exceed the virtual size of the file.
limit_range_file(&self, address: u64, count: usize) -> usize846 fn limit_range_file(&self, address: u64, count: usize) -> usize {
847 if address.checked_add(count as u64).is_none() || address > self.virtual_size() {
848 return 0;
849 }
850 min(count as u64, self.virtual_size() - address) as usize
851 }
852
853 // Limits the range so that it doesn't overflow the end of a cluster.
limit_range_cluster(&self, address: u64, count: usize) -> usize854 fn limit_range_cluster(&self, address: u64, count: usize) -> usize {
855 let offset: u64 = self.raw_file.cluster_offset(address);
856 let limit = self.raw_file.cluster_size() - offset;
857 min(count as u64, limit) as usize
858 }
859
860 // Gets the maximum virtual size of this image.
virtual_size(&self) -> u64861 fn virtual_size(&self) -> u64 {
862 self.header.size
863 }
864
865 // Gets the offset of `address` in the L1 table.
l1_address_offset(&self, address: u64) -> u64866 fn l1_address_offset(&self, address: u64) -> u64 {
867 let l1_index = self.l1_table_index(address);
868 l1_index * size_of::<u64>() as u64
869 }
870
871 // Gets the offset of `address` in the L1 table.
l1_table_index(&self, address: u64) -> u64872 fn l1_table_index(&self, address: u64) -> u64 {
873 (address / self.raw_file.cluster_size()) / self.l2_entries
874 }
875
876 // Gets the offset of `address` in the L2 table.
l2_table_index(&self, address: u64) -> u64877 fn l2_table_index(&self, address: u64) -> u64 {
878 (address / self.raw_file.cluster_size()) % self.l2_entries
879 }
880
881 // Gets the offset of the given guest address in the host file. If L1, L2, or data clusters have
882 // yet to be allocated, return None.
file_offset_read(&mut self, address: u64) -> std::io::Result<Option<u64>>883 fn file_offset_read(&mut self, address: u64) -> std::io::Result<Option<u64>> {
884 if address >= self.virtual_size() as u64 {
885 return Err(std::io::Error::from_raw_os_error(EINVAL));
886 }
887
888 let l1_index = self.l1_table_index(address) as usize;
889 let l2_addr_disk = *self
890 .l1_table
891 .get(l1_index)
892 .ok_or(std::io::Error::from_raw_os_error(EINVAL))?;
893
894 if l2_addr_disk == 0 {
895 // Reading from an unallocated cluster will return zeros.
896 return Ok(None);
897 }
898
899 let l2_index = self.l2_table_index(address) as usize;
900
901 if !self.l2_cache.contains_key(&l1_index) {
902 // Not in the cache.
903 let table =
904 VecCache::from_vec(Self::read_l2_cluster(&mut self.raw_file, l2_addr_disk)?);
905
906 let l1_table = &self.l1_table;
907 let raw_file = &mut self.raw_file;
908 self.l2_cache.insert(l1_index, table, |index, evicted| {
909 raw_file.write_pointer_table(
910 l1_table[index],
911 evicted.get_values(),
912 CLUSTER_USED_FLAG,
913 )
914 })?;
915 };
916
917 let cluster_addr = self.l2_cache.get(&l1_index).unwrap()[l2_index];
918 if cluster_addr == 0 {
919 return Ok(None);
920 }
921 Ok(Some(cluster_addr + self.raw_file.cluster_offset(address)))
922 }
923
924 // Gets the offset of the given guest address in the host file. If L1, L2, or data clusters need
925 // to be allocated, they will be.
file_offset_write(&mut self, address: u64) -> std::io::Result<u64>926 fn file_offset_write(&mut self, address: u64) -> std::io::Result<u64> {
927 if address >= self.virtual_size() as u64 {
928 return Err(std::io::Error::from_raw_os_error(EINVAL));
929 }
930
931 let l1_index = self.l1_table_index(address) as usize;
932 let l2_addr_disk = *self
933 .l1_table
934 .get(l1_index)
935 .ok_or(std::io::Error::from_raw_os_error(EINVAL))?;
936 let l2_index = self.l2_table_index(address) as usize;
937
938 let mut set_refcounts = Vec::new();
939
940 if !self.l2_cache.contains_key(&l1_index) {
941 // Not in the cache.
942 let l2_table = if l2_addr_disk == 0 {
943 // Allocate a new cluster to store the L2 table and update the L1 table to point
944 // to the new table.
945 let new_addr: u64 = self.get_new_cluster()?;
946 // The cluster refcount starts at one meaning it is used but doesn't need COW.
947 set_refcounts.push((new_addr, 1));
948 self.l1_table[l1_index] = new_addr;
949 VecCache::new(self.l2_entries as usize)
950 } else {
951 VecCache::from_vec(Self::read_l2_cluster(&mut self.raw_file, l2_addr_disk)?)
952 };
953 let l1_table = &self.l1_table;
954 let raw_file = &mut self.raw_file;
955 self.l2_cache.insert(l1_index, l2_table, |index, evicted| {
956 raw_file.write_pointer_table(
957 l1_table[index],
958 evicted.get_values(),
959 CLUSTER_USED_FLAG,
960 )
961 })?;
962 }
963
964 let cluster_addr = match self.l2_cache.get(&l1_index).unwrap()[l2_index] {
965 0 => {
966 // Need to allocate a data cluster
967 let cluster_addr = self.append_data_cluster()?;
968 self.update_cluster_addr(l1_index, l2_index, cluster_addr, &mut set_refcounts)?;
969 cluster_addr
970 }
971 a => a,
972 };
973
974 for (addr, count) in set_refcounts {
975 let mut newly_unref = self.set_cluster_refcount(addr, count)?;
976 self.unref_clusters.append(&mut newly_unref);
977 }
978
979 Ok(cluster_addr + self.raw_file.cluster_offset(address))
980 }
981
982 // Updates the l1 and l2 tables to point to the new `cluster_addr`.
update_cluster_addr( &mut self, l1_index: usize, l2_index: usize, cluster_addr: u64, set_refcounts: &mut Vec<(u64, u16)>, ) -> io::Result<()>983 fn update_cluster_addr(
984 &mut self,
985 l1_index: usize,
986 l2_index: usize,
987 cluster_addr: u64,
988 set_refcounts: &mut Vec<(u64, u16)>,
989 ) -> io::Result<()> {
990 if !self.l2_cache.get(&l1_index).unwrap().dirty() {
991 // Free the previously used cluster if one exists. Modified tables are always
992 // witten to new clusters so the L1 table can be committed to disk after they
993 // are and L1 never points at an invalid table.
994 // The index must be valid from when it was insterted.
995 let addr = self.l1_table[l1_index];
996 if addr != 0 {
997 self.unref_clusters.push(addr);
998 set_refcounts.push((addr, 0));
999 }
1000
1001 // Allocate a new cluster to store the L2 table and update the L1 table to point
1002 // to the new table. The cluster will be written when the cache is flushed, no
1003 // need to copy the data now.
1004 let new_addr: u64 = self.get_new_cluster()?;
1005 // The cluster refcount starts at one indicating it is used but doesn't need
1006 // COW.
1007 set_refcounts.push((new_addr, 1));
1008 self.l1_table[l1_index] = new_addr;
1009 }
1010 // 'unwrap' is OK because it was just added.
1011 self.l2_cache.get_mut(&l1_index).unwrap()[l2_index] = cluster_addr;
1012 Ok(())
1013 }
1014
1015 // Allocate a new cluster and return its offset within the raw file.
get_new_cluster(&mut self) -> std::io::Result<u64>1016 fn get_new_cluster(&mut self) -> std::io::Result<u64> {
1017 // First use a pre allocated cluster if one is available.
1018 if let Some(free_cluster) = self.avail_clusters.pop() {
1019 let cluster_size = self.raw_file.cluster_size() as usize;
1020 self.raw_file
1021 .file_mut()
1022 .seek(SeekFrom::Start(free_cluster))?;
1023 self.raw_file.file_mut().write_zeroes(cluster_size)?;
1024 return Ok(free_cluster);
1025 }
1026
1027 let max_valid_cluster_offset = self.refcounts.max_valid_cluster_offset();
1028 if let Some(new_cluster) = self.raw_file.add_cluster_end(max_valid_cluster_offset)? {
1029 return Ok(new_cluster);
1030 } else {
1031 error!("No free clusters in get_new_cluster()");
1032 return Err(std::io::Error::from_raw_os_error(ENOSPC));
1033 }
1034 }
1035
1036 // Allocate and initialize a new data cluster. Returns the offset of the
1037 // cluster in to the file on success.
append_data_cluster(&mut self) -> std::io::Result<u64>1038 fn append_data_cluster(&mut self) -> std::io::Result<u64> {
1039 let new_addr: u64 = self.get_new_cluster()?;
1040 // The cluster refcount starts at one indicating it is used but doesn't need COW.
1041 let mut newly_unref = self.set_cluster_refcount(new_addr, 1)?;
1042 self.unref_clusters.append(&mut newly_unref);
1043 Ok(new_addr)
1044 }
1045
1046 // Returns true if the cluster containing `address` is already allocated.
cluster_allocated(&mut self, address: u64) -> std::io::Result<bool>1047 fn cluster_allocated(&mut self, address: u64) -> std::io::Result<bool> {
1048 if address >= self.virtual_size() as u64 {
1049 return Err(std::io::Error::from_raw_os_error(EINVAL));
1050 }
1051
1052 let l1_index = self.l1_table_index(address) as usize;
1053 let l2_addr_disk = *self
1054 .l1_table
1055 .get(l1_index)
1056 .ok_or(std::io::Error::from_raw_os_error(EINVAL))?;
1057 let l2_index = self.l2_table_index(address) as usize;
1058
1059 if l2_addr_disk == 0 {
1060 // The whole L2 table for this address is not allocated yet,
1061 // so the cluster must also be unallocated.
1062 return Ok(false);
1063 }
1064
1065 if !self.l2_cache.contains_key(&l1_index) {
1066 // Not in the cache.
1067 let table =
1068 VecCache::from_vec(Self::read_l2_cluster(&mut self.raw_file, l2_addr_disk)?);
1069 let l1_table = &self.l1_table;
1070 let raw_file = &mut self.raw_file;
1071 self.l2_cache.insert(l1_index, table, |index, evicted| {
1072 raw_file.write_pointer_table(
1073 l1_table[index],
1074 evicted.get_values(),
1075 CLUSTER_USED_FLAG,
1076 )
1077 })?;
1078 }
1079
1080 let cluster_addr = self.l2_cache.get(&l1_index).unwrap()[l2_index];
1081 // If cluster_addr != 0, the cluster is allocated.
1082 Ok(cluster_addr != 0)
1083 }
1084
1085 // Find the first guest address greater than or equal to `address` whose allocation state
1086 // matches `allocated`.
find_allocated_cluster( &mut self, address: u64, allocated: bool, ) -> std::io::Result<Option<u64>>1087 fn find_allocated_cluster(
1088 &mut self,
1089 address: u64,
1090 allocated: bool,
1091 ) -> std::io::Result<Option<u64>> {
1092 let size = self.virtual_size();
1093 if address >= size {
1094 return Ok(None);
1095 }
1096
1097 // If offset is already within a hole, return it.
1098 if self.cluster_allocated(address)? == allocated {
1099 return Ok(Some(address));
1100 }
1101
1102 // Skip to the next cluster boundary.
1103 let cluster_size = self.raw_file.cluster_size();
1104 let mut cluster_addr = (address / cluster_size + 1) * cluster_size;
1105
1106 // Search for clusters with the desired allocation state.
1107 while cluster_addr < size {
1108 if self.cluster_allocated(cluster_addr)? == allocated {
1109 return Ok(Some(cluster_addr));
1110 }
1111 cluster_addr += cluster_size;
1112 }
1113
1114 Ok(None)
1115 }
1116
1117 // Deallocate the storage for the cluster starting at `address`.
1118 // Any future reads of this cluster will return all zeroes.
deallocate_cluster(&mut self, address: u64) -> std::io::Result<()>1119 fn deallocate_cluster(&mut self, address: u64) -> std::io::Result<()> {
1120 if address >= self.virtual_size() as u64 {
1121 return Err(std::io::Error::from_raw_os_error(EINVAL));
1122 }
1123
1124 let l1_index = self.l1_table_index(address) as usize;
1125 let l2_addr_disk = *self
1126 .l1_table
1127 .get(l1_index)
1128 .ok_or(std::io::Error::from_raw_os_error(EINVAL))?;
1129 let l2_index = self.l2_table_index(address) as usize;
1130
1131 if l2_addr_disk == 0 {
1132 // The whole L2 table for this address is not allocated yet,
1133 // so the cluster must also be unallocated.
1134 return Ok(());
1135 }
1136
1137 if !self.l2_cache.contains_key(&l1_index) {
1138 // Not in the cache.
1139 let table =
1140 VecCache::from_vec(Self::read_l2_cluster(&mut self.raw_file, l2_addr_disk)?);
1141 let l1_table = &self.l1_table;
1142 let raw_file = &mut self.raw_file;
1143 self.l2_cache.insert(l1_index, table, |index, evicted| {
1144 raw_file.write_pointer_table(
1145 l1_table[index],
1146 evicted.get_values(),
1147 CLUSTER_USED_FLAG,
1148 )
1149 })?;
1150 }
1151
1152 let cluster_addr = self.l2_cache.get(&l1_index).unwrap()[l2_index];
1153 if cluster_addr == 0 {
1154 // This cluster is already unallocated; nothing to do.
1155 return Ok(());
1156 }
1157
1158 // Decrement the refcount.
1159 let refcount = self
1160 .refcounts
1161 .get_cluster_refcount(&mut self.raw_file, cluster_addr)
1162 .map_err(|_| std::io::Error::from_raw_os_error(EINVAL))?;
1163 if refcount == 0 {
1164 return Err(std::io::Error::from_raw_os_error(EINVAL));
1165 }
1166
1167 let new_refcount = refcount - 1;
1168 let mut newly_unref = self.set_cluster_refcount(cluster_addr, new_refcount)?;
1169 self.unref_clusters.append(&mut newly_unref);
1170
1171 // Rewrite the L2 entry to remove the cluster mapping.
1172 // unwrap is safe as we just checked/inserted this entry.
1173 self.l2_cache.get_mut(&l1_index).unwrap()[l2_index] = 0;
1174
1175 if new_refcount == 0 {
1176 let cluster_size = self.raw_file.cluster_size();
1177 // This cluster is no longer in use; deallocate the storage.
1178 // The underlying FS may not support FALLOC_FL_PUNCH_HOLE,
1179 // so don't treat an error as fatal. Future reads will return zeros anyways.
1180 let _ = self
1181 .raw_file
1182 .file_mut()
1183 .punch_hole(cluster_addr, cluster_size);
1184 self.unref_clusters.push(cluster_addr);
1185 }
1186 Ok(())
1187 }
1188
1189 // Deallocate the storage for `length` bytes starting at `address`.
1190 // Any future reads of this range will return all zeroes.
deallocate_bytes(&mut self, address: u64, length: usize) -> std::io::Result<()>1191 fn deallocate_bytes(&mut self, address: u64, length: usize) -> std::io::Result<()> {
1192 let write_count: usize = self.limit_range_file(address, length);
1193
1194 let mut nwritten: usize = 0;
1195 while nwritten < write_count {
1196 let curr_addr = address + nwritten as u64;
1197 let count = self.limit_range_cluster(curr_addr, write_count - nwritten);
1198
1199 if count == self.raw_file.cluster_size() as usize {
1200 // Full cluster - deallocate the storage.
1201 self.deallocate_cluster(curr_addr)?;
1202 } else {
1203 // Partial cluster - zero out the relevant bytes if it was allocated.
1204 // Any space in unallocated clusters can be left alone, since
1205 // unallocated clusters already read back as zeroes.
1206 if let Some(offset) = self.file_offset_read(curr_addr)? {
1207 // Partial cluster - zero it out.
1208 self.raw_file.file_mut().seek(SeekFrom::Start(offset))?;
1209 self.raw_file.file_mut().write_zeroes(count)?;
1210 }
1211 }
1212
1213 nwritten += count;
1214 }
1215 Ok(())
1216 }
1217
1218 // Reads an L2 cluster from the disk, returning an error if the file can't be read or if any
1219 // cluster is compressed.
read_l2_cluster(raw_file: &mut QcowRawFile, cluster_addr: u64) -> std::io::Result<Vec<u64>>1220 fn read_l2_cluster(raw_file: &mut QcowRawFile, cluster_addr: u64) -> std::io::Result<Vec<u64>> {
1221 let file_values = raw_file.read_pointer_cluster(cluster_addr, None)?;
1222 if file_values.iter().any(|entry| entry & COMPRESSED_FLAG != 0) {
1223 return Err(std::io::Error::from_raw_os_error(ENOTSUP));
1224 }
1225 Ok(file_values
1226 .iter()
1227 .map(|entry| *entry & L2_TABLE_OFFSET_MASK)
1228 .collect())
1229 }
1230
1231 // Set the refcount for a cluster with the given address.
1232 // Returns a list of any refblocks that can be reused, this happens when a refblock is moved,
1233 // the old location can be reused.
set_cluster_refcount(&mut self, address: u64, refcount: u16) -> std::io::Result<Vec<u64>>1234 fn set_cluster_refcount(&mut self, address: u64, refcount: u16) -> std::io::Result<Vec<u64>> {
1235 let mut added_clusters = Vec::new();
1236 let mut unref_clusters = Vec::new();
1237 let mut refcount_set = false;
1238 let mut new_cluster = None;
1239
1240 while !refcount_set {
1241 match self.refcounts.set_cluster_refcount(
1242 &mut self.raw_file,
1243 address,
1244 refcount,
1245 new_cluster.take(),
1246 ) {
1247 Ok(None) => {
1248 refcount_set = true;
1249 }
1250 Ok(Some(freed_cluster)) => {
1251 unref_clusters.push(freed_cluster);
1252 refcount_set = true;
1253 }
1254 Err(refcount::Error::EvictingRefCounts(e)) => {
1255 return Err(e);
1256 }
1257 Err(refcount::Error::InvalidIndex) => {
1258 return Err(std::io::Error::from_raw_os_error(EINVAL));
1259 }
1260 Err(refcount::Error::NeedCluster(addr)) => {
1261 // Read the address and call set_cluster_refcount again.
1262 new_cluster = Some((
1263 addr,
1264 VecCache::from_vec(self.raw_file.read_refcount_block(addr)?),
1265 ));
1266 }
1267 Err(refcount::Error::NeedNewCluster) => {
1268 // Allocate the cluster and call set_cluster_refcount again.
1269 let addr = self.get_new_cluster()?;
1270 added_clusters.push(addr);
1271 new_cluster = Some((
1272 addr,
1273 VecCache::new(self.refcounts.refcounts_per_block() as usize),
1274 ));
1275 }
1276 Err(refcount::Error::ReadingRefCounts(e)) => {
1277 return Err(e);
1278 }
1279 }
1280 }
1281
1282 for addr in added_clusters {
1283 self.set_cluster_refcount(addr, 1)?;
1284 }
1285 Ok(unref_clusters)
1286 }
1287
sync_caches(&mut self) -> std::io::Result<()>1288 fn sync_caches(&mut self) -> std::io::Result<()> {
1289 // Write out all dirty L2 tables.
1290 for (l1_index, l2_table) in self.l2_cache.iter_mut().filter(|(_k, v)| v.dirty()) {
1291 // The index must be valid from when we insterted it.
1292 let addr = self.l1_table[*l1_index];
1293 if addr != 0 {
1294 self.raw_file.write_pointer_table(
1295 addr,
1296 l2_table.get_values(),
1297 CLUSTER_USED_FLAG,
1298 )?;
1299 } else {
1300 return Err(std::io::Error::from_raw_os_error(EINVAL));
1301 }
1302 l2_table.mark_clean();
1303 }
1304 // Write the modified refcount blocks.
1305 self.refcounts.flush_blocks(&mut self.raw_file)?;
1306 // Make sure metadata(file len) and all data clusters are written.
1307 self.raw_file.file_mut().sync_all()?;
1308
1309 // Push L1 table and refcount table last as all the clusters they point to are now
1310 // guaranteed to be valid.
1311 let mut sync_required = false;
1312 if self.l1_table.dirty() {
1313 self.raw_file.write_pointer_table(
1314 self.header.l1_table_offset,
1315 &self.l1_table.get_values(),
1316 0,
1317 )?;
1318 self.l1_table.mark_clean();
1319 sync_required = true;
1320 }
1321 sync_required |= self.refcounts.flush_table(&mut self.raw_file)?;
1322 if sync_required {
1323 self.raw_file.file_mut().sync_data()?;
1324 }
1325 Ok(())
1326 }
1327
1328 // Reads `count` bytes from the cursor position, calling `cb` repeatedly with the backing file,
1329 // number of bytes read so far, and number of bytes to read from the file in that invocation. If
1330 // None is given to `cb` in place of the backing file, the `cb` should infer zeros would have
1331 // been read.
read_cb<F>(&mut self, count: usize, mut cb: F) -> std::io::Result<usize> where F: FnMut(Option<&mut File>, usize, usize) -> std::io::Result<()>,1332 fn read_cb<F>(&mut self, count: usize, mut cb: F) -> std::io::Result<usize>
1333 where
1334 F: FnMut(Option<&mut File>, usize, usize) -> std::io::Result<()>,
1335 {
1336 let address: u64 = self.current_offset as u64;
1337 let read_count: usize = self.limit_range_file(address, count);
1338
1339 let mut nread: usize = 0;
1340 while nread < read_count {
1341 let curr_addr = address + nread as u64;
1342 let file_offset = self.file_offset_read(curr_addr)?;
1343 let count = self.limit_range_cluster(curr_addr, read_count - nread);
1344
1345 if let Some(offset) = file_offset {
1346 self.raw_file.file_mut().seek(SeekFrom::Start(offset))?;
1347 cb(Some(self.raw_file.file_mut()), nread, count)?;
1348 } else {
1349 cb(None, nread, count)?;
1350 }
1351
1352 nread += count;
1353 }
1354 self.current_offset += read_count as u64;
1355 Ok(read_count)
1356 }
1357
1358 // Writes `count` bytes to the cursor position, calling `cb` repeatedly with the backing file,
1359 // number of bytes written so far, and number of bytes to write to the file in that invocation.
write_cb<F>(&mut self, count: usize, mut cb: F) -> std::io::Result<usize> where F: FnMut(&mut File, usize, usize) -> std::io::Result<()>,1360 fn write_cb<F>(&mut self, count: usize, mut cb: F) -> std::io::Result<usize>
1361 where
1362 F: FnMut(&mut File, usize, usize) -> std::io::Result<()>,
1363 {
1364 let address: u64 = self.current_offset as u64;
1365 let write_count: usize = self.limit_range_file(address, count);
1366
1367 let mut nwritten: usize = 0;
1368 while nwritten < write_count {
1369 let curr_addr = address + nwritten as u64;
1370 let offset = self.file_offset_write(curr_addr)?;
1371 let count = self.limit_range_cluster(curr_addr, write_count - nwritten);
1372
1373 if let Err(e) = self.raw_file.file_mut().seek(SeekFrom::Start(offset)) {
1374 return Err(e);
1375 }
1376 if let Err(e) = cb(self.raw_file.file_mut(), nwritten, count) {
1377 return Err(e);
1378 }
1379
1380 nwritten += count;
1381 }
1382 self.current_offset += write_count as u64;
1383 Ok(write_count)
1384 }
1385 }
1386
1387 impl Drop for QcowFile {
drop(&mut self)1388 fn drop(&mut self) {
1389 let _ = self.sync_caches();
1390 }
1391 }
1392
1393 impl AsRawFd for QcowFile {
as_raw_fd(&self) -> RawFd1394 fn as_raw_fd(&self) -> RawFd {
1395 self.raw_file.file().as_raw_fd()
1396 }
1397 }
1398
1399 impl Read for QcowFile {
read(&mut self, buf: &mut [u8]) -> std::io::Result<usize>1400 fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
1401 self.read_cb(buf.len(), |file, offset, count| match file {
1402 Some(f) => f.read_exact(&mut buf[offset..(offset + count)]),
1403 None => {
1404 for b in &mut buf[offset..(offset + count)] {
1405 *b = 0;
1406 }
1407 Ok(())
1408 }
1409 })
1410 }
1411 }
1412
1413 impl Seek for QcowFile {
seek(&mut self, pos: SeekFrom) -> std::io::Result<u64>1414 fn seek(&mut self, pos: SeekFrom) -> std::io::Result<u64> {
1415 let new_offset: Option<u64> = match pos {
1416 SeekFrom::Start(off) => Some(off),
1417 SeekFrom::End(off) => {
1418 if off < 0 {
1419 0i64.checked_sub(off)
1420 .and_then(|increment| self.virtual_size().checked_sub(increment as u64))
1421 } else {
1422 self.virtual_size().checked_add(off as u64)
1423 }
1424 }
1425 SeekFrom::Current(off) => {
1426 if off < 0 {
1427 0i64.checked_sub(off)
1428 .and_then(|increment| self.current_offset.checked_sub(increment as u64))
1429 } else {
1430 self.current_offset.checked_add(off as u64)
1431 }
1432 }
1433 };
1434
1435 if let Some(o) = new_offset {
1436 if o <= self.virtual_size() {
1437 self.current_offset = o;
1438 return Ok(o);
1439 }
1440 }
1441 Err(std::io::Error::from_raw_os_error(EINVAL))
1442 }
1443 }
1444
1445 impl Write for QcowFile {
write(&mut self, buf: &[u8]) -> std::io::Result<usize>1446 fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
1447 self.write_cb(buf.len(), |file, offset, count| {
1448 file.write_all(&buf[offset..(offset + count)])
1449 })
1450 }
1451
flush(&mut self) -> std::io::Result<()>1452 fn flush(&mut self) -> std::io::Result<()> {
1453 self.sync_caches()?;
1454 self.avail_clusters.append(&mut self.unref_clusters);
1455 Ok(())
1456 }
1457 }
1458
1459 impl FileReadWriteVolatile for QcowFile {
read_volatile(&mut self, slice: VolatileSlice) -> io::Result<usize>1460 fn read_volatile(&mut self, slice: VolatileSlice) -> io::Result<usize> {
1461 self.read_cb(slice.size() as usize, |file, offset, count| {
1462 let sub_slice = slice.get_slice(offset as u64, count as u64).unwrap();
1463 match file {
1464 Some(f) => f.read_exact_volatile(sub_slice),
1465 None => {
1466 sub_slice.write_bytes(0);
1467 Ok(())
1468 }
1469 }
1470 })
1471 }
1472
write_volatile(&mut self, slice: VolatileSlice) -> io::Result<usize>1473 fn write_volatile(&mut self, slice: VolatileSlice) -> io::Result<usize> {
1474 self.write_cb(slice.size() as usize, |file, offset, count| {
1475 let sub_slice = slice.get_slice(offset as u64, count as u64).unwrap();
1476 file.write_all_volatile(sub_slice)
1477 })
1478 }
1479 }
1480
1481 impl FileSync for QcowFile {
fsync(&mut self) -> std::io::Result<()>1482 fn fsync(&mut self) -> std::io::Result<()> {
1483 self.flush()
1484 }
1485 }
1486
1487 impl FileSetLen for QcowFile {
set_len(&self, _len: u64) -> std::io::Result<()>1488 fn set_len(&self, _len: u64) -> std::io::Result<()> {
1489 Err(std::io::Error::new(
1490 std::io::ErrorKind::Other,
1491 "set_len() not supported for QcowFile",
1492 ))
1493 }
1494 }
1495
1496 impl PunchHole for QcowFile {
punch_hole(&mut self, offset: u64, length: u64) -> std::io::Result<()>1497 fn punch_hole(&mut self, offset: u64, length: u64) -> std::io::Result<()> {
1498 let mut remaining = length;
1499 let mut offset = offset;
1500 while remaining > 0 {
1501 let chunk_length = min(remaining, std::usize::MAX as u64) as usize;
1502 self.deallocate_bytes(offset, chunk_length)?;
1503 remaining -= chunk_length as u64;
1504 offset += chunk_length as u64;
1505 }
1506 Ok(())
1507 }
1508 }
1509
1510 impl SeekHole for QcowFile {
seek_hole(&mut self, offset: u64) -> io::Result<Option<u64>>1511 fn seek_hole(&mut self, offset: u64) -> io::Result<Option<u64>> {
1512 match self.find_allocated_cluster(offset, false) {
1513 Err(e) => Err(e),
1514 Ok(None) => {
1515 if offset < self.virtual_size() {
1516 Ok(Some(self.seek(SeekFrom::End(0))?))
1517 } else {
1518 Ok(None)
1519 }
1520 }
1521 Ok(Some(o)) => {
1522 self.seek(SeekFrom::Start(o))?;
1523 Ok(Some(o))
1524 }
1525 }
1526 }
1527
seek_data(&mut self, offset: u64) -> io::Result<Option<u64>>1528 fn seek_data(&mut self, offset: u64) -> io::Result<Option<u64>> {
1529 match self.find_allocated_cluster(offset, true) {
1530 Err(e) => Err(e),
1531 Ok(None) => Ok(None),
1532 Ok(Some(o)) => {
1533 self.seek(SeekFrom::Start(o))?;
1534 Ok(Some(o))
1535 }
1536 }
1537 }
1538 }
1539
1540 // Returns an Error if the given offset doesn't align to a cluster boundary.
offset_is_cluster_boundary(offset: u64, cluster_bits: u32) -> Result<()>1541 fn offset_is_cluster_boundary(offset: u64, cluster_bits: u32) -> Result<()> {
1542 if offset & ((0x01 << cluster_bits) - 1) != 0 {
1543 return Err(Error::InvalidOffset(offset));
1544 }
1545 Ok(())
1546 }
1547
1548 // Ceiling of the division of `dividend`/`divisor`.
div_round_up_u64(dividend: u64, divisor: u64) -> u641549 fn div_round_up_u64(dividend: u64, divisor: u64) -> u64 {
1550 (dividend + divisor - 1) / divisor
1551 }
1552
1553 // Ceiling of the division of `dividend`/`divisor`.
div_round_up_u32(dividend: u32, divisor: u32) -> u321554 fn div_round_up_u32(dividend: u32, divisor: u32) -> u32 {
1555 (dividend + divisor - 1) / divisor
1556 }
1557
convert_copy<R, W>(reader: &mut R, writer: &mut W, offset: u64, size: u64) -> Result<()> where R: Read + Seek, W: Write + Seek,1558 fn convert_copy<R, W>(reader: &mut R, writer: &mut W, offset: u64, size: u64) -> Result<()>
1559 where
1560 R: Read + Seek,
1561 W: Write + Seek,
1562 {
1563 const CHUNK_SIZE: usize = 65536;
1564 let mut buf = [0; CHUNK_SIZE];
1565 let mut read_count = 0;
1566 reader
1567 .seek(SeekFrom::Start(offset))
1568 .map_err(Error::SeekingFile)?;
1569 writer
1570 .seek(SeekFrom::Start(offset))
1571 .map_err(Error::SeekingFile)?;
1572 loop {
1573 let this_count = min(CHUNK_SIZE as u64, size - read_count) as usize;
1574 let nread = reader
1575 .read(&mut buf[..this_count])
1576 .map_err(Error::ReadingData)?;
1577 writer.write(&buf[..nread]).map_err(Error::WritingData)?;
1578 read_count += nread as u64;
1579 if nread == 0 || read_count == size {
1580 break;
1581 }
1582 }
1583
1584 Ok(())
1585 }
1586
convert_reader_writer<R, W>(reader: &mut R, writer: &mut W, size: u64) -> Result<()> where R: Read + Seek + SeekHole, W: Write + Seek,1587 fn convert_reader_writer<R, W>(reader: &mut R, writer: &mut W, size: u64) -> Result<()>
1588 where
1589 R: Read + Seek + SeekHole,
1590 W: Write + Seek,
1591 {
1592 let mut offset = 0;
1593 while offset < size {
1594 // Find the next range of data.
1595 let next_data = match reader.seek_data(offset).map_err(Error::SeekingFile)? {
1596 Some(o) => o,
1597 None => {
1598 // No more data in the file.
1599 break;
1600 }
1601 };
1602 let next_hole = match reader.seek_hole(next_data).map_err(Error::SeekingFile)? {
1603 Some(o) => o,
1604 None => {
1605 // This should not happen - there should always be at least one hole
1606 // after any data.
1607 return Err(Error::SeekingFile(io::Error::from_raw_os_error(EINVAL)));
1608 }
1609 };
1610 let count = next_hole - next_data;
1611 convert_copy(reader, writer, next_data, count)?;
1612 offset = next_hole;
1613 }
1614
1615 Ok(())
1616 }
1617
convert_reader<R>(reader: &mut R, dst_file: File, dst_type: ImageType) -> Result<()> where R: Read + Seek + SeekHole,1618 fn convert_reader<R>(reader: &mut R, dst_file: File, dst_type: ImageType) -> Result<()>
1619 where
1620 R: Read + Seek + SeekHole,
1621 {
1622 let src_size = reader.seek(SeekFrom::End(0)).map_err(Error::SeekingFile)?;
1623 reader
1624 .seek(SeekFrom::Start(0))
1625 .map_err(Error::SeekingFile)?;
1626
1627 // Ensure the destination file is empty before writing to it.
1628 dst_file.set_len(0).map_err(Error::SettingFileSize)?;
1629
1630 match dst_type {
1631 ImageType::Qcow2 => {
1632 let mut dst_writer = QcowFile::new(dst_file, src_size)?;
1633 convert_reader_writer(reader, &mut dst_writer, src_size)
1634 }
1635 ImageType::Raw => {
1636 let mut dst_writer = dst_file;
1637 // Set the length of the destination file to convert it into a sparse file
1638 // of the desired size.
1639 dst_writer
1640 .set_len(src_size)
1641 .map_err(Error::SettingFileSize)?;
1642 convert_reader_writer(reader, &mut dst_writer, src_size)
1643 }
1644 }
1645 }
1646
1647 /// Copy the contents of a disk image in `src_file` into `dst_file`.
1648 /// The type of `src_file` is automatically detected, and the output file type is
1649 /// determined by `dst_type`.
convert(src_file: File, dst_file: File, dst_type: ImageType) -> Result<()>1650 pub fn convert(src_file: File, dst_file: File, dst_type: ImageType) -> Result<()> {
1651 let src_type = detect_image_type(&src_file)?;
1652 match src_type {
1653 ImageType::Qcow2 => {
1654 let mut src_reader = QcowFile::from(src_file)?;
1655 convert_reader(&mut src_reader, dst_file, dst_type)
1656 }
1657 ImageType::Raw => {
1658 // src_file is a raw file.
1659 let mut src_reader = src_file;
1660 convert_reader(&mut src_reader, dst_file, dst_type)
1661 }
1662 }
1663 }
1664
1665 /// Detect the type of an image file by checking for a valid qcow2 header.
detect_image_type(file: &File) -> Result<ImageType>1666 pub fn detect_image_type(file: &File) -> Result<ImageType> {
1667 let mut f = file;
1668 let orig_seek = f.seek(SeekFrom::Current(0)).map_err(Error::SeekingFile)?;
1669 f.seek(SeekFrom::Start(0)).map_err(Error::SeekingFile)?;
1670 let magic = f.read_u32::<BigEndian>().map_err(Error::ReadingHeader)?;
1671 let image_type = if magic == QCOW_MAGIC {
1672 ImageType::Qcow2
1673 } else {
1674 ImageType::Raw
1675 };
1676 f.seek(SeekFrom::Start(orig_seek))
1677 .map_err(Error::SeekingFile)?;
1678 Ok(image_type)
1679 }
1680
1681 #[cfg(test)]
1682 mod tests {
1683 use super::*;
1684 use std::fs::File;
1685 use std::io::{Read, Seek, SeekFrom, Write};
1686 use sys_util::SharedMemory;
1687
valid_header() -> Vec<u8>1688 fn valid_header() -> Vec<u8> {
1689 vec![
1690 0x51u8, 0x46, 0x49, 0xfb, // magic
1691 0x00, 0x00, 0x00, 0x03, // version
1692 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // backing file offset
1693 0x00, 0x00, 0x00, 0x00, // backing file size
1694 0x00, 0x00, 0x00, 0x10, // cluster_bits
1695 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, // size
1696 0x00, 0x00, 0x00, 0x00, // crypt method
1697 0x00, 0x00, 0x01, 0x00, // L1 size
1698 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, // L1 table offset
1699 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, // refcount table offset
1700 0x00, 0x00, 0x00, 0x03, // refcount table clusters
1701 0x00, 0x00, 0x00, 0x00, // nb snapshots
1702 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, // snapshots offset
1703 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // incompatible_features
1704 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // compatible_features
1705 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // autoclear_features
1706 0x00, 0x00, 0x00, 0x04, // refcount_order
1707 0x00, 0x00, 0x00, 0x68, // header_length
1708 ]
1709 }
1710
1711 // Test case found by clusterfuzz to allocate excessive memory.
test_huge_header() -> Vec<u8>1712 fn test_huge_header() -> Vec<u8> {
1713 vec![
1714 0x51, 0x46, 0x49, 0xfb, // magic
1715 0x00, 0x00, 0x00, 0x03, // version
1716 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // backing file offset
1717 0x00, 0x00, 0x00, 0x00, // backing file size
1718 0x00, 0x00, 0x00, 0x09, // cluster_bits
1719 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, // size
1720 0x00, 0x00, 0x00, 0x00, // crypt method
1721 0x00, 0x00, 0x01, 0x00, // L1 size
1722 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, // L1 table offset
1723 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, // refcount table offset
1724 0x00, 0x00, 0x00, 0x03, // refcount table clusters
1725 0x00, 0x00, 0x00, 0x00, // nb snapshots
1726 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, // snapshots offset
1727 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // incompatible_features
1728 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // compatible_features
1729 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // autoclear_features
1730 0x00, 0x00, 0x00, 0x04, // refcount_order
1731 0x00, 0x00, 0x00, 0x68, // header_length
1732 ]
1733 }
1734
with_basic_file<F>(header: &[u8], mut testfn: F) where F: FnMut(File),1735 fn with_basic_file<F>(header: &[u8], mut testfn: F)
1736 where
1737 F: FnMut(File),
1738 {
1739 let shm = SharedMemory::new(None).unwrap();
1740 let mut disk_file: File = shm.into();
1741 disk_file.write_all(&header).unwrap();
1742 disk_file.set_len(0x1_0000_0000).unwrap();
1743 disk_file.seek(SeekFrom::Start(0)).unwrap();
1744
1745 testfn(disk_file); // File closed when the function exits.
1746 }
1747
with_default_file<F>(file_size: u64, mut testfn: F) where F: FnMut(QcowFile),1748 fn with_default_file<F>(file_size: u64, mut testfn: F)
1749 where
1750 F: FnMut(QcowFile),
1751 {
1752 let shm = SharedMemory::new(None).unwrap();
1753 let qcow_file = QcowFile::new(shm.into(), file_size).unwrap();
1754
1755 testfn(qcow_file); // File closed when the function exits.
1756 }
1757
1758 #[test]
default_header()1759 fn default_header() {
1760 let header = QcowHeader::create_for_size(0x10_0000);
1761 let shm = SharedMemory::new(None).unwrap();
1762 let mut disk_file: File = shm.into();
1763 header
1764 .write_to(&mut disk_file)
1765 .expect("Failed to write header to shm.");
1766 disk_file.seek(SeekFrom::Start(0)).unwrap();
1767 QcowFile::from(disk_file).expect("Failed to create Qcow from default Header");
1768 }
1769
1770 #[test]
header_read()1771 fn header_read() {
1772 with_basic_file(&valid_header(), |mut disk_file: File| {
1773 QcowHeader::new(&mut disk_file).expect("Failed to create Header.");
1774 });
1775 }
1776
1777 #[test]
invalid_magic()1778 fn invalid_magic() {
1779 let invalid_header = vec![0x51u8, 0x46, 0x4a, 0xfb];
1780 with_basic_file(&invalid_header, |mut disk_file: File| {
1781 QcowHeader::new(&mut disk_file).expect_err("Invalid header worked.");
1782 });
1783 }
1784
1785 #[test]
invalid_refcount_order()1786 fn invalid_refcount_order() {
1787 let mut header = valid_header();
1788 header[99] = 2;
1789 with_basic_file(&header, |disk_file: File| {
1790 QcowFile::from(disk_file).expect_err("Invalid refcount order worked.");
1791 });
1792 }
1793
1794 #[test]
invalid_cluster_bits()1795 fn invalid_cluster_bits() {
1796 let mut header = test_huge_header();
1797 header[23] = 3;
1798 with_basic_file(&test_huge_header(), |disk_file: File| {
1799 QcowFile::from(disk_file).expect_err("Failed to create file.");
1800 });
1801 }
1802
1803 #[test]
test_header_huge_file()1804 fn test_header_huge_file() {
1805 let header = test_huge_header();
1806 with_basic_file(&header, |disk_file: File| {
1807 QcowFile::from(disk_file).expect_err("Failed to create file.");
1808 });
1809 }
1810
1811 #[test]
test_header_1_tb_file_min_cluster()1812 fn test_header_1_tb_file_min_cluster() {
1813 let mut header = test_huge_header();
1814 header[24] = 0;
1815 header[26] = 1;
1816 header[31] = 0;
1817 // 1 TB with the min cluster size makes the arrays too big, it should fail.
1818 with_basic_file(&header, |disk_file: File| {
1819 QcowFile::from(disk_file).expect_err("Failed to create file.");
1820 });
1821 }
1822
1823 #[test]
test_header_1_tb_file()1824 fn test_header_1_tb_file() {
1825 let mut header = test_huge_header();
1826 // reset to 1 TB size.
1827 header[24] = 0;
1828 header[26] = 1;
1829 header[31] = 0;
1830 // set cluster_bits
1831 header[23] = 16;
1832 with_basic_file(&header, |disk_file: File| {
1833 let mut qcow = QcowFile::from(disk_file).expect("Failed to create file.");
1834 qcow.seek(SeekFrom::Start(0x100_0000_0000 - 8))
1835 .expect("Failed to seek.");
1836 let value = 0x0000_0040_3f00_ffffu64;
1837 qcow.write_all(&value.to_le_bytes())
1838 .expect("failed to write data");
1839 });
1840 }
1841
1842 #[test]
write_read_start()1843 fn write_read_start() {
1844 with_basic_file(&valid_header(), |disk_file: File| {
1845 let mut q = QcowFile::from(disk_file).unwrap();
1846 q.write(b"test first bytes")
1847 .expect("Failed to write test string.");
1848 let mut buf = [0u8; 4];
1849 q.seek(SeekFrom::Start(0)).expect("Failed to seek.");
1850 q.read(&mut buf).expect("Failed to read.");
1851 assert_eq!(&buf, b"test");
1852 });
1853 }
1854
1855 #[test]
offset_write_read()1856 fn offset_write_read() {
1857 with_basic_file(&valid_header(), |disk_file: File| {
1858 let mut q = QcowFile::from(disk_file).unwrap();
1859 let b = [0x55u8; 0x1000];
1860 q.seek(SeekFrom::Start(0xfff2000)).expect("Failed to seek.");
1861 q.write(&b).expect("Failed to write test string.");
1862 let mut buf = [0u8; 4];
1863 q.seek(SeekFrom::Start(0xfff2000)).expect("Failed to seek.");
1864 q.read(&mut buf).expect("Failed to read.");
1865 assert_eq!(buf[0], 0x55);
1866 });
1867 }
1868
1869 #[test]
write_zeroes_read()1870 fn write_zeroes_read() {
1871 with_basic_file(&valid_header(), |disk_file: File| {
1872 let mut q = QcowFile::from(disk_file).unwrap();
1873 // Write some test data.
1874 let b = [0x55u8; 0x1000];
1875 q.seek(SeekFrom::Start(0xfff2000)).expect("Failed to seek.");
1876 q.write(&b).expect("Failed to write test string.");
1877 // Overwrite the test data with zeroes.
1878 q.seek(SeekFrom::Start(0xfff2000)).expect("Failed to seek.");
1879 let nwritten = q.write_zeroes(0x200).expect("Failed to write zeroes.");
1880 assert_eq!(nwritten, 0x200);
1881 // Verify that the correct part of the data was zeroed out.
1882 let mut buf = [0u8; 0x1000];
1883 q.seek(SeekFrom::Start(0xfff2000)).expect("Failed to seek.");
1884 q.read(&mut buf).expect("Failed to read.");
1885 assert_eq!(buf[0], 0);
1886 assert_eq!(buf[0x1FF], 0);
1887 assert_eq!(buf[0x200], 0x55);
1888 assert_eq!(buf[0xFFF], 0x55);
1889 });
1890 }
1891
1892 #[test]
write_zeroes_full_cluster()1893 fn write_zeroes_full_cluster() {
1894 // Choose a size that is larger than a cluster.
1895 // valid_header uses cluster_bits = 12, which corresponds to a cluster size of 4096.
1896 const CHUNK_SIZE: usize = 4096 * 2 + 512;
1897 with_basic_file(&valid_header(), |disk_file: File| {
1898 let mut q = QcowFile::from(disk_file).unwrap();
1899 // Write some test data.
1900 let b = [0x55u8; CHUNK_SIZE];
1901 q.seek(SeekFrom::Start(0)).expect("Failed to seek.");
1902 q.write(&b).expect("Failed to write test string.");
1903 // Overwrite the full cluster with zeroes.
1904 q.seek(SeekFrom::Start(0)).expect("Failed to seek.");
1905 let nwritten = q.write_zeroes(CHUNK_SIZE).expect("Failed to write zeroes.");
1906 assert_eq!(nwritten, CHUNK_SIZE);
1907 // Verify that the data was zeroed out.
1908 let mut buf = [0u8; CHUNK_SIZE];
1909 q.seek(SeekFrom::Start(0)).expect("Failed to seek.");
1910 q.read(&mut buf).expect("Failed to read.");
1911 assert_eq!(buf[0], 0);
1912 assert_eq!(buf[CHUNK_SIZE - 1], 0);
1913 });
1914 }
1915
1916 #[test]
test_header()1917 fn test_header() {
1918 with_basic_file(&valid_header(), |disk_file: File| {
1919 let q = QcowFile::from(disk_file).unwrap();
1920 assert_eq!(q.virtual_size(), 0x20_0000_0000);
1921 });
1922 }
1923
1924 #[test]
read_small_buffer()1925 fn read_small_buffer() {
1926 with_basic_file(&valid_header(), |disk_file: File| {
1927 let mut q = QcowFile::from(disk_file).unwrap();
1928 let mut b = [5u8; 16];
1929 q.seek(SeekFrom::Start(1000)).expect("Failed to seek.");
1930 q.read(&mut b).expect("Failed to read.");
1931 assert_eq!(0, b[0]);
1932 assert_eq!(0, b[15]);
1933 });
1934 }
1935
1936 #[test]
replay_ext4()1937 fn replay_ext4() {
1938 with_basic_file(&valid_header(), |disk_file: File| {
1939 let mut q = QcowFile::from(disk_file).unwrap();
1940 const BUF_SIZE: usize = 0x1000;
1941 let mut b = [0u8; BUF_SIZE];
1942
1943 struct Transfer {
1944 pub write: bool,
1945 pub addr: u64,
1946 };
1947
1948 // Write transactions from mkfs.ext4.
1949 let xfers: Vec<Transfer> = vec![
1950 Transfer {
1951 write: false,
1952 addr: 0xfff0000,
1953 },
1954 Transfer {
1955 write: false,
1956 addr: 0xfffe000,
1957 },
1958 Transfer {
1959 write: false,
1960 addr: 0x0,
1961 },
1962 Transfer {
1963 write: false,
1964 addr: 0x1000,
1965 },
1966 Transfer {
1967 write: false,
1968 addr: 0xffff000,
1969 },
1970 Transfer {
1971 write: false,
1972 addr: 0xffdf000,
1973 },
1974 Transfer {
1975 write: false,
1976 addr: 0xfff8000,
1977 },
1978 Transfer {
1979 write: false,
1980 addr: 0xffe0000,
1981 },
1982 Transfer {
1983 write: false,
1984 addr: 0xffce000,
1985 },
1986 Transfer {
1987 write: false,
1988 addr: 0xffb6000,
1989 },
1990 Transfer {
1991 write: false,
1992 addr: 0xffab000,
1993 },
1994 Transfer {
1995 write: false,
1996 addr: 0xffa4000,
1997 },
1998 Transfer {
1999 write: false,
2000 addr: 0xff8e000,
2001 },
2002 Transfer {
2003 write: false,
2004 addr: 0xff86000,
2005 },
2006 Transfer {
2007 write: false,
2008 addr: 0xff84000,
2009 },
2010 Transfer {
2011 write: false,
2012 addr: 0xff89000,
2013 },
2014 Transfer {
2015 write: false,
2016 addr: 0xfe7e000,
2017 },
2018 Transfer {
2019 write: false,
2020 addr: 0x100000,
2021 },
2022 Transfer {
2023 write: false,
2024 addr: 0x3000,
2025 },
2026 Transfer {
2027 write: false,
2028 addr: 0x7000,
2029 },
2030 Transfer {
2031 write: false,
2032 addr: 0xf000,
2033 },
2034 Transfer {
2035 write: false,
2036 addr: 0x2000,
2037 },
2038 Transfer {
2039 write: false,
2040 addr: 0x4000,
2041 },
2042 Transfer {
2043 write: false,
2044 addr: 0x5000,
2045 },
2046 Transfer {
2047 write: false,
2048 addr: 0x6000,
2049 },
2050 Transfer {
2051 write: false,
2052 addr: 0x8000,
2053 },
2054 Transfer {
2055 write: false,
2056 addr: 0x9000,
2057 },
2058 Transfer {
2059 write: false,
2060 addr: 0xa000,
2061 },
2062 Transfer {
2063 write: false,
2064 addr: 0xb000,
2065 },
2066 Transfer {
2067 write: false,
2068 addr: 0xc000,
2069 },
2070 Transfer {
2071 write: false,
2072 addr: 0xd000,
2073 },
2074 Transfer {
2075 write: false,
2076 addr: 0xe000,
2077 },
2078 Transfer {
2079 write: false,
2080 addr: 0x10000,
2081 },
2082 Transfer {
2083 write: false,
2084 addr: 0x11000,
2085 },
2086 Transfer {
2087 write: false,
2088 addr: 0x12000,
2089 },
2090 Transfer {
2091 write: false,
2092 addr: 0x13000,
2093 },
2094 Transfer {
2095 write: false,
2096 addr: 0x14000,
2097 },
2098 Transfer {
2099 write: false,
2100 addr: 0x15000,
2101 },
2102 Transfer {
2103 write: false,
2104 addr: 0x16000,
2105 },
2106 Transfer {
2107 write: false,
2108 addr: 0x17000,
2109 },
2110 Transfer {
2111 write: false,
2112 addr: 0x18000,
2113 },
2114 Transfer {
2115 write: false,
2116 addr: 0x19000,
2117 },
2118 Transfer {
2119 write: false,
2120 addr: 0x1a000,
2121 },
2122 Transfer {
2123 write: false,
2124 addr: 0x1b000,
2125 },
2126 Transfer {
2127 write: false,
2128 addr: 0x1c000,
2129 },
2130 Transfer {
2131 write: false,
2132 addr: 0x1d000,
2133 },
2134 Transfer {
2135 write: false,
2136 addr: 0x1e000,
2137 },
2138 Transfer {
2139 write: false,
2140 addr: 0x1f000,
2141 },
2142 Transfer {
2143 write: false,
2144 addr: 0x21000,
2145 },
2146 Transfer {
2147 write: false,
2148 addr: 0x22000,
2149 },
2150 Transfer {
2151 write: false,
2152 addr: 0x24000,
2153 },
2154 Transfer {
2155 write: false,
2156 addr: 0x40000,
2157 },
2158 Transfer {
2159 write: false,
2160 addr: 0x0,
2161 },
2162 Transfer {
2163 write: false,
2164 addr: 0x3000,
2165 },
2166 Transfer {
2167 write: false,
2168 addr: 0x7000,
2169 },
2170 Transfer {
2171 write: false,
2172 addr: 0x0,
2173 },
2174 Transfer {
2175 write: false,
2176 addr: 0x1000,
2177 },
2178 Transfer {
2179 write: false,
2180 addr: 0x2000,
2181 },
2182 Transfer {
2183 write: false,
2184 addr: 0x3000,
2185 },
2186 Transfer {
2187 write: false,
2188 addr: 0x0,
2189 },
2190 Transfer {
2191 write: false,
2192 addr: 0x449000,
2193 },
2194 Transfer {
2195 write: false,
2196 addr: 0x48000,
2197 },
2198 Transfer {
2199 write: false,
2200 addr: 0x48000,
2201 },
2202 Transfer {
2203 write: false,
2204 addr: 0x448000,
2205 },
2206 Transfer {
2207 write: false,
2208 addr: 0x44a000,
2209 },
2210 Transfer {
2211 write: false,
2212 addr: 0x48000,
2213 },
2214 Transfer {
2215 write: false,
2216 addr: 0x48000,
2217 },
2218 Transfer {
2219 write: true,
2220 addr: 0x0,
2221 },
2222 Transfer {
2223 write: true,
2224 addr: 0x448000,
2225 },
2226 Transfer {
2227 write: true,
2228 addr: 0x449000,
2229 },
2230 Transfer {
2231 write: true,
2232 addr: 0x44a000,
2233 },
2234 Transfer {
2235 write: true,
2236 addr: 0xfff0000,
2237 },
2238 Transfer {
2239 write: true,
2240 addr: 0xfff1000,
2241 },
2242 Transfer {
2243 write: true,
2244 addr: 0xfff2000,
2245 },
2246 Transfer {
2247 write: true,
2248 addr: 0xfff3000,
2249 },
2250 Transfer {
2251 write: true,
2252 addr: 0xfff4000,
2253 },
2254 Transfer {
2255 write: true,
2256 addr: 0xfff5000,
2257 },
2258 Transfer {
2259 write: true,
2260 addr: 0xfff6000,
2261 },
2262 Transfer {
2263 write: true,
2264 addr: 0xfff7000,
2265 },
2266 Transfer {
2267 write: true,
2268 addr: 0xfff8000,
2269 },
2270 Transfer {
2271 write: true,
2272 addr: 0xfff9000,
2273 },
2274 Transfer {
2275 write: true,
2276 addr: 0xfffa000,
2277 },
2278 Transfer {
2279 write: true,
2280 addr: 0xfffb000,
2281 },
2282 Transfer {
2283 write: true,
2284 addr: 0xfffc000,
2285 },
2286 Transfer {
2287 write: true,
2288 addr: 0xfffd000,
2289 },
2290 Transfer {
2291 write: true,
2292 addr: 0xfffe000,
2293 },
2294 Transfer {
2295 write: true,
2296 addr: 0xffff000,
2297 },
2298 ];
2299
2300 for xfer in &xfers {
2301 q.seek(SeekFrom::Start(xfer.addr)).expect("Failed to seek.");
2302 if xfer.write {
2303 q.write(&b).expect("Failed to write.");
2304 } else {
2305 let read_count: usize = q.read(&mut b).expect("Failed to read.");
2306 assert_eq!(read_count, BUF_SIZE);
2307 }
2308 }
2309 });
2310 }
2311
2312 #[test]
combo_write_read()2313 fn combo_write_read() {
2314 with_default_file(1024 * 1024 * 1024 * 256, |mut qcow_file| {
2315 const NUM_BLOCKS: usize = 555;
2316 const BLOCK_SIZE: usize = 0x1_0000;
2317 const OFFSET: usize = 0x1_0000_0020;
2318 let data = [0x55u8; BLOCK_SIZE];
2319 let mut readback = [0u8; BLOCK_SIZE];
2320 for i in 0..NUM_BLOCKS {
2321 let seek_offset = OFFSET + i * BLOCK_SIZE;
2322 qcow_file
2323 .seek(SeekFrom::Start(seek_offset as u64))
2324 .expect("Failed to seek.");
2325 let nwritten = qcow_file.write(&data).expect("Failed to write test data.");
2326 assert_eq!(nwritten, BLOCK_SIZE);
2327 // Read back the data to check it was written correctly.
2328 qcow_file
2329 .seek(SeekFrom::Start(seek_offset as u64))
2330 .expect("Failed to seek.");
2331 let nread = qcow_file.read(&mut readback).expect("Failed to read.");
2332 assert_eq!(nread, BLOCK_SIZE);
2333 for (orig, read) in data.iter().zip(readback.iter()) {
2334 assert_eq!(orig, read);
2335 }
2336 }
2337 // Check that address 0 is still zeros.
2338 qcow_file.seek(SeekFrom::Start(0)).expect("Failed to seek.");
2339 let nread = qcow_file.read(&mut readback).expect("Failed to read.");
2340 assert_eq!(nread, BLOCK_SIZE);
2341 for read in readback.iter() {
2342 assert_eq!(*read, 0);
2343 }
2344 // Check the data again after the writes have happened.
2345 for i in 0..NUM_BLOCKS {
2346 let seek_offset = OFFSET + i * BLOCK_SIZE;
2347 qcow_file
2348 .seek(SeekFrom::Start(seek_offset as u64))
2349 .expect("Failed to seek.");
2350 let nread = qcow_file.read(&mut readback).expect("Failed to read.");
2351 assert_eq!(nread, BLOCK_SIZE);
2352 for (orig, read) in data.iter().zip(readback.iter()) {
2353 assert_eq!(orig, read);
2354 }
2355 }
2356
2357 assert_eq!(qcow_file.first_zero_refcount().unwrap(), None);
2358 });
2359 }
2360
seek_cur(file: &mut QcowFile) -> u642361 fn seek_cur(file: &mut QcowFile) -> u64 {
2362 file.seek(SeekFrom::Current(0)).unwrap()
2363 }
2364
2365 #[test]
seek_data()2366 fn seek_data() {
2367 with_default_file(0x30000, |mut file| {
2368 // seek_data at or after the end of the file should return None
2369 assert_eq!(file.seek_data(0x10000).unwrap(), None);
2370 assert_eq!(seek_cur(&mut file), 0);
2371 assert_eq!(file.seek_data(0x10001).unwrap(), None);
2372 assert_eq!(seek_cur(&mut file), 0);
2373
2374 // Write some data to [0x10000, 0x20000)
2375 let b = [0x55u8; 0x10000];
2376 file.seek(SeekFrom::Start(0x10000)).unwrap();
2377 file.write_all(&b).unwrap();
2378 assert_eq!(file.seek_data(0).unwrap(), Some(0x10000));
2379 assert_eq!(seek_cur(&mut file), 0x10000);
2380
2381 // seek_data within data should return the same offset
2382 assert_eq!(file.seek_data(0x10000).unwrap(), Some(0x10000));
2383 assert_eq!(seek_cur(&mut file), 0x10000);
2384 assert_eq!(file.seek_data(0x10001).unwrap(), Some(0x10001));
2385 assert_eq!(seek_cur(&mut file), 0x10001);
2386 assert_eq!(file.seek_data(0x1FFFF).unwrap(), Some(0x1FFFF));
2387 assert_eq!(seek_cur(&mut file), 0x1FFFF);
2388
2389 assert_eq!(file.seek_data(0).unwrap(), Some(0x10000));
2390 assert_eq!(seek_cur(&mut file), 0x10000);
2391 assert_eq!(file.seek_data(0x1FFFF).unwrap(), Some(0x1FFFF));
2392 assert_eq!(seek_cur(&mut file), 0x1FFFF);
2393 assert_eq!(file.seek_data(0x20000).unwrap(), None);
2394 assert_eq!(seek_cur(&mut file), 0x1FFFF);
2395 });
2396 }
2397
2398 #[test]
seek_hole()2399 fn seek_hole() {
2400 with_default_file(0x30000, |mut file| {
2401 // File consisting entirely of a hole
2402 assert_eq!(file.seek_hole(0).unwrap(), Some(0));
2403 assert_eq!(seek_cur(&mut file), 0);
2404 assert_eq!(file.seek_hole(0xFFFF).unwrap(), Some(0xFFFF));
2405 assert_eq!(seek_cur(&mut file), 0xFFFF);
2406
2407 // seek_hole at or after the end of the file should return None
2408 file.seek(SeekFrom::Start(0)).unwrap();
2409 assert_eq!(file.seek_hole(0x30000).unwrap(), None);
2410 assert_eq!(seek_cur(&mut file), 0);
2411 assert_eq!(file.seek_hole(0x30001).unwrap(), None);
2412 assert_eq!(seek_cur(&mut file), 0);
2413
2414 // Write some data to [0x10000, 0x20000)
2415 let b = [0x55u8; 0x10000];
2416 file.seek(SeekFrom::Start(0x10000)).unwrap();
2417 file.write_all(&b).unwrap();
2418
2419 // seek_hole within a hole should return the same offset
2420 assert_eq!(file.seek_hole(0).unwrap(), Some(0));
2421 assert_eq!(seek_cur(&mut file), 0);
2422 assert_eq!(file.seek_hole(0xFFFF).unwrap(), Some(0xFFFF));
2423 assert_eq!(seek_cur(&mut file), 0xFFFF);
2424
2425 // seek_hole within data should return the next hole
2426 file.seek(SeekFrom::Start(0)).unwrap();
2427 assert_eq!(file.seek_hole(0x10000).unwrap(), Some(0x20000));
2428 assert_eq!(seek_cur(&mut file), 0x20000);
2429 file.seek(SeekFrom::Start(0)).unwrap();
2430 assert_eq!(file.seek_hole(0x10001).unwrap(), Some(0x20000));
2431 assert_eq!(seek_cur(&mut file), 0x20000);
2432 file.seek(SeekFrom::Start(0)).unwrap();
2433 assert_eq!(file.seek_hole(0x1FFFF).unwrap(), Some(0x20000));
2434 assert_eq!(seek_cur(&mut file), 0x20000);
2435 file.seek(SeekFrom::Start(0)).unwrap();
2436 assert_eq!(file.seek_hole(0xFFFF).unwrap(), Some(0xFFFF));
2437 assert_eq!(seek_cur(&mut file), 0xFFFF);
2438 file.seek(SeekFrom::Start(0)).unwrap();
2439 assert_eq!(file.seek_hole(0x10000).unwrap(), Some(0x20000));
2440 assert_eq!(seek_cur(&mut file), 0x20000);
2441 file.seek(SeekFrom::Start(0)).unwrap();
2442 assert_eq!(file.seek_hole(0x1FFFF).unwrap(), Some(0x20000));
2443 assert_eq!(seek_cur(&mut file), 0x20000);
2444 file.seek(SeekFrom::Start(0)).unwrap();
2445 assert_eq!(file.seek_hole(0x20000).unwrap(), Some(0x20000));
2446 assert_eq!(seek_cur(&mut file), 0x20000);
2447 file.seek(SeekFrom::Start(0)).unwrap();
2448 assert_eq!(file.seek_hole(0x20001).unwrap(), Some(0x20001));
2449 assert_eq!(seek_cur(&mut file), 0x20001);
2450
2451 // seek_hole at EOF should return None
2452 file.seek(SeekFrom::Start(0)).unwrap();
2453 assert_eq!(file.seek_hole(0x30000).unwrap(), None);
2454 assert_eq!(seek_cur(&mut file), 0);
2455
2456 // Write some data to [0x20000, 0x30000)
2457 file.seek(SeekFrom::Start(0x20000)).unwrap();
2458 file.write_all(&b).unwrap();
2459
2460 // seek_hole within [0x20000, 0x30000) should now find the hole at EOF
2461 assert_eq!(file.seek_hole(0x20000).unwrap(), Some(0x30000));
2462 assert_eq!(seek_cur(&mut file), 0x30000);
2463 file.seek(SeekFrom::Start(0)).unwrap();
2464 assert_eq!(file.seek_hole(0x20001).unwrap(), Some(0x30000));
2465 assert_eq!(seek_cur(&mut file), 0x30000);
2466 file.seek(SeekFrom::Start(0)).unwrap();
2467 assert_eq!(file.seek_hole(0x30000).unwrap(), None);
2468 assert_eq!(seek_cur(&mut file), 0);
2469 });
2470 }
2471
2472 #[test]
rebuild_refcounts()2473 fn rebuild_refcounts() {
2474 with_basic_file(&valid_header(), |mut disk_file: File| {
2475 let header = QcowHeader::new(&mut disk_file).expect("Failed to create Header.");
2476 let cluster_size = 65536;
2477 let mut raw_file =
2478 QcowRawFile::from(disk_file, cluster_size).expect("Failed to create QcowRawFile.");
2479 QcowFile::rebuild_refcounts(&mut raw_file, header)
2480 .expect("Failed to rebuild recounts.");
2481 });
2482 }
2483 }
2484