• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2020 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 use libc::EIO;
18 use std::io;
19 
20 use super::common::{build_fsverity_digest, merkle_tree_height, FsverityError};
21 use crate::common::{divide_roundup, CHUNK_SIZE};
22 use crate::crypto::{CryptoError, Sha256Hasher};
23 use crate::file::{ChunkBuffer, ReadByChunk};
24 
25 const ZEROS: [u8; CHUNK_SIZE as usize] = [0u8; CHUNK_SIZE as usize];
26 
27 type HashBuffer = [u8; Sha256Hasher::HASH_SIZE];
28 
hash_with_padding(chunk: &[u8], pad_to: usize) -> Result<HashBuffer, CryptoError>29 fn hash_with_padding(chunk: &[u8], pad_to: usize) -> Result<HashBuffer, CryptoError> {
30     let padding_size = pad_to - chunk.len();
31     Sha256Hasher::new()?.update(chunk)?.update(&ZEROS[..padding_size])?.finalize()
32 }
33 
verity_check<T: ReadByChunk>( chunk: &[u8], chunk_index: u64, file_size: u64, merkle_tree: &T, ) -> Result<HashBuffer, FsverityError>34 fn verity_check<T: ReadByChunk>(
35     chunk: &[u8],
36     chunk_index: u64,
37     file_size: u64,
38     merkle_tree: &T,
39 ) -> Result<HashBuffer, FsverityError> {
40     // The caller should not be able to produce a chunk at the first place if `file_size` is 0. The
41     // current implementation expects to crash when a `ReadByChunk` implementation reads
42     // beyond the file size, including empty file.
43     assert_ne!(file_size, 0);
44 
45     let chunk_hash = hash_with_padding(chunk, CHUNK_SIZE as usize)?;
46 
47     // When the file is smaller or equal to CHUNK_SIZE, the root of Merkle tree is defined as the
48     // hash of the file content, plus padding.
49     if file_size <= CHUNK_SIZE {
50         return Ok(chunk_hash);
51     }
52 
53     fsverity_walk(chunk_index, file_size, merkle_tree)?.try_fold(
54         chunk_hash,
55         |actual_hash, result| {
56             let (merkle_chunk, hash_offset_in_chunk) = result?;
57             let expected_hash =
58                 &merkle_chunk[hash_offset_in_chunk..hash_offset_in_chunk + Sha256Hasher::HASH_SIZE];
59             if actual_hash != expected_hash {
60                 return Err(FsverityError::CannotVerify);
61             }
62             Ok(hash_with_padding(&merkle_chunk, CHUNK_SIZE as usize)?)
63         },
64     )
65 }
66 
67 /// Given a chunk index and the size of the file, returns an iterator that walks the Merkle tree
68 /// from the leaf to the root. The iterator carries the slice of the chunk/node as well as the
69 /// offset of the child node's hash. It is up to the iterator user to use the node and hash,
70 /// e.g. for the actual verification.
71 #[allow(clippy::needless_collect)]
fsverity_walk<T: ReadByChunk>( chunk_index: u64, file_size: u64, merkle_tree: &T, ) -> Result<impl Iterator<Item = Result<([u8; 4096], usize), FsverityError>> + '_, FsverityError>72 fn fsverity_walk<T: ReadByChunk>(
73     chunk_index: u64,
74     file_size: u64,
75     merkle_tree: &T,
76 ) -> Result<impl Iterator<Item = Result<([u8; 4096], usize), FsverityError>> + '_, FsverityError> {
77     let hashes_per_node = CHUNK_SIZE / Sha256Hasher::HASH_SIZE as u64;
78     debug_assert_eq!(hashes_per_node, 128u64);
79     let max_level = merkle_tree_height(file_size).expect("file should not be empty") as u32;
80     let root_to_leaf_steps = (0..=max_level)
81         .rev()
82         .map(|x| {
83             let leaves_per_hash = hashes_per_node.pow(x);
84             let leaves_size_per_hash = CHUNK_SIZE * leaves_per_hash;
85             let leaves_size_per_node = leaves_size_per_hash * hashes_per_node;
86             let nodes_at_level = divide_roundup(file_size, leaves_size_per_node);
87             let level_size = nodes_at_level * CHUNK_SIZE;
88             let offset_in_level = (chunk_index / leaves_per_hash) * Sha256Hasher::HASH_SIZE as u64;
89             (level_size, offset_in_level)
90         })
91         .scan(0, |level_offset, (level_size, offset_in_level)| {
92             let this_level_offset = *level_offset;
93             *level_offset += level_size;
94             let global_hash_offset = this_level_offset + offset_in_level;
95             Some(global_hash_offset)
96         })
97         .map(|global_hash_offset| {
98             let chunk_index = global_hash_offset / CHUNK_SIZE;
99             let hash_offset_in_chunk = (global_hash_offset % CHUNK_SIZE) as usize;
100             (chunk_index, hash_offset_in_chunk)
101         })
102         .collect::<Vec<_>>(); // Needs to collect first to be able to reverse below.
103 
104     Ok(root_to_leaf_steps.into_iter().rev().map(move |(chunk_index, hash_offset_in_chunk)| {
105         let mut merkle_chunk = [0u8; 4096];
106         // read_chunk is supposed to return a full chunk, or an incomplete one at the end of the
107         // file. In the incomplete case, the hash is calculated with 0-padding to the chunk size.
108         // Therefore, we don't need to check the returned size here.
109         let _ = merkle_tree.read_chunk(chunk_index, &mut merkle_chunk)?;
110         Ok((merkle_chunk, hash_offset_in_chunk))
111     }))
112 }
113 
114 pub struct VerifiedFileReader<F: ReadByChunk, M: ReadByChunk> {
115     pub file_size: u64,
116     chunked_file: F,
117     merkle_tree: M,
118     root_hash: HashBuffer,
119 }
120 
121 impl<F: ReadByChunk, M: ReadByChunk> VerifiedFileReader<F, M> {
new( chunked_file: F, file_size: u64, expected_digest: &[u8], merkle_tree: M, ) -> Result<VerifiedFileReader<F, M>, FsverityError>122     pub fn new(
123         chunked_file: F,
124         file_size: u64,
125         expected_digest: &[u8],
126         merkle_tree: M,
127     ) -> Result<VerifiedFileReader<F, M>, FsverityError> {
128         let mut buf = [0u8; CHUNK_SIZE as usize];
129         if file_size <= CHUNK_SIZE {
130             let _size = chunked_file.read_chunk(0, &mut buf)?;
131             // The rest of buffer is 0-padded.
132         } else {
133             let size = merkle_tree.read_chunk(0, &mut buf)?;
134             if buf.len() != size {
135                 return Err(FsverityError::InsufficientData(size));
136             }
137         }
138         let root_hash = Sha256Hasher::new()?.update(&buf[..])?.finalize()?;
139         if expected_digest == build_fsverity_digest(&root_hash, file_size)? {
140             // Once verified, use the root_hash for verification going forward.
141             Ok(VerifiedFileReader { chunked_file, file_size, merkle_tree, root_hash })
142         } else {
143             Err(FsverityError::InvalidDigest)
144         }
145     }
146 }
147 
148 impl<F: ReadByChunk, M: ReadByChunk> ReadByChunk for VerifiedFileReader<F, M> {
read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize>149     fn read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize> {
150         let size = self.chunked_file.read_chunk(chunk_index, buf)?;
151         let root_hash = verity_check(&buf[..size], chunk_index, self.file_size, &self.merkle_tree)
152             .map_err(|_| io::Error::from_raw_os_error(EIO))?;
153         if root_hash != self.root_hash {
154             Err(io::Error::from_raw_os_error(EIO))
155         } else {
156             Ok(size)
157         }
158     }
159 }
160 
161 #[cfg(test)]
162 mod tests {
163     use super::*;
164     use crate::file::ReadByChunk;
165     use anyhow::Result;
166     use authfs_fsverity_metadata::{parse_fsverity_metadata, FSVerityMetadata};
167     use std::cmp::min;
168     use std::fs::File;
169     use std::os::unix::fs::FileExt;
170 
171     struct LocalFileReader {
172         file: File,
173         size: u64,
174     }
175 
176     impl LocalFileReader {
new(file: File) -> io::Result<LocalFileReader>177         fn new(file: File) -> io::Result<LocalFileReader> {
178             let size = file.metadata()?.len();
179             Ok(LocalFileReader { file, size })
180         }
181 
len(&self) -> u64182         fn len(&self) -> u64 {
183             self.size
184         }
185     }
186 
187     impl ReadByChunk for LocalFileReader {
read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize>188         fn read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize> {
189             let start = chunk_index * CHUNK_SIZE;
190             if start >= self.size {
191                 return Ok(0);
192             }
193             let end = min(self.size, start + CHUNK_SIZE);
194             let read_size = (end - start) as usize;
195             debug_assert!(read_size <= buf.len());
196             self.file.read_exact_at(&mut buf[..read_size], start)?;
197             Ok(read_size)
198         }
199     }
200 
201     type LocalVerifiedFileReader = VerifiedFileReader<LocalFileReader, MerkleTreeReader>;
202 
203     pub struct MerkleTreeReader {
204         metadata: Box<FSVerityMetadata>,
205     }
206 
207     impl ReadByChunk for MerkleTreeReader {
read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize>208         fn read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize> {
209             self.metadata.read_merkle_tree(chunk_index * CHUNK_SIZE, buf)
210         }
211     }
212 
total_chunk_number(file_size: u64) -> u64213     fn total_chunk_number(file_size: u64) -> u64 {
214         (file_size + 4095) / 4096
215     }
216 
217     // Returns a reader with fs-verity verification and the file size.
new_reader_with_fsverity( content_path: &str, metadata_path: &str, ) -> Result<(LocalVerifiedFileReader, u64)>218     fn new_reader_with_fsverity(
219         content_path: &str,
220         metadata_path: &str,
221     ) -> Result<(LocalVerifiedFileReader, u64)> {
222         let file_reader = LocalFileReader::new(File::open(content_path)?)?;
223         let file_size = file_reader.len();
224         let metadata = parse_fsverity_metadata(File::open(metadata_path)?)?;
225         Ok((
226             VerifiedFileReader::new(
227                 file_reader,
228                 file_size,
229                 &metadata.digest.clone(),
230                 MerkleTreeReader { metadata },
231             )?,
232             file_size,
233         ))
234     }
235 
236     #[test]
fsverity_verify_full_read_4k() -> Result<()>237     fn fsverity_verify_full_read_4k() -> Result<()> {
238         let (file_reader, file_size) =
239             new_reader_with_fsverity("testdata/input.4k", "testdata/input.4k.fsv_meta")?;
240 
241         for i in 0..total_chunk_number(file_size) {
242             let mut buf = [0u8; 4096];
243             assert!(file_reader.read_chunk(i, &mut buf).is_ok());
244         }
245         Ok(())
246     }
247 
248     #[test]
fsverity_verify_full_read_4k1() -> Result<()>249     fn fsverity_verify_full_read_4k1() -> Result<()> {
250         let (file_reader, file_size) =
251             new_reader_with_fsverity("testdata/input.4k1", "testdata/input.4k1.fsv_meta")?;
252 
253         for i in 0..total_chunk_number(file_size) {
254             let mut buf = [0u8; 4096];
255             assert!(file_reader.read_chunk(i, &mut buf).is_ok());
256         }
257         Ok(())
258     }
259 
260     #[test]
fsverity_verify_full_read_4m() -> Result<()>261     fn fsverity_verify_full_read_4m() -> Result<()> {
262         let (file_reader, file_size) =
263             new_reader_with_fsverity("testdata/input.4m", "testdata/input.4m.fsv_meta")?;
264 
265         for i in 0..total_chunk_number(file_size) {
266             let mut buf = [0u8; 4096];
267             assert!(file_reader.read_chunk(i, &mut buf).is_ok());
268         }
269         Ok(())
270     }
271 
272     #[test]
fsverity_verify_bad_merkle_tree() -> Result<()>273     fn fsverity_verify_bad_merkle_tree() -> Result<()> {
274         let (file_reader, _) = new_reader_with_fsverity(
275             "testdata/input.4m",
276             "testdata/input.4m.fsv_meta.bad_merkle", // First leaf node is corrupted.
277         )?;
278 
279         // A lowest broken node (a 4K chunk that contains 128 sha256 hashes) will fail the read
280         // failure of the underlying chunks, but not before or after.
281         let mut buf = [0u8; 4096];
282         let num_hashes = 4096 / 32;
283         let last_index = num_hashes;
284         for i in 0..last_index {
285             assert!(file_reader.read_chunk(i, &mut buf).is_err());
286         }
287         assert!(file_reader.read_chunk(last_index, &mut buf).is_ok());
288         Ok(())
289     }
290 }
291