/packages/modules/Virtualization/authfs/src/fsverity/ |
D | editor.rs | 75 debug_assert!(usize::MAX as u64 == u64::MAX, "Only 64-bit arch is supported"); in debug_assert_usize_is_u64() 101 offset_from_alignment: usize, in new_hash_for_incomplete_write() argument 102 output_chunk_index: usize, in new_hash_for_incomplete_write() argument 108 let mut orig_data = [0u8; CHUNK_SIZE as usize]; in new_hash_for_incomplete_write() 132 offset_from_alignment: usize, in new_chunk_hash() argument 133 current_size: usize, in new_chunk_hash() argument 134 output_chunk_index: usize, in new_chunk_hash() argument 159 fn write_at(&self, buf: &[u8], offset: u64) -> io::Result<usize> { in write_at() argument 173 ChunkedSizeIter::new(buf.len(), offset, CHUNK_SIZE as usize) in write_at() 179 let offset_in_buf = (output_offset - offset) as usize; in write_at() [all …]
|
D | builder.rs | 21 const HASH_SIZE: usize = Sha256Hasher::HASH_SIZE; 22 const HASH_PER_PAGE: usize = CHUNK_SIZE as usize / HASH_SIZE; 64 pub fn resize(&mut self, new_file_size: usize) { in resize() argument 67 self.leaves.resize(leaves_size as usize, Sha256Hasher::HASH_OF_4096_ZEROS); in resize() 73 pub fn update_hash(&mut self, index: usize, hash: &Sha256Hash, size_at_least: u64) { in update_hash() argument 88 pub fn is_index_valid(&self, index: usize) -> bool { in is_index_valid() 93 pub fn is_consistent(&self, index: usize, hash: &Sha256Hash) -> bool { in is_consistent() argument 197 let hash = Sha256Hasher::new()?.update(&vec![1u8; CHUNK_SIZE as usize])?.finalize()?; in merkle_tree_non_sequential() 236 tree.resize(CHUNK_SIZE as usize * 2 - 100); in merkle_tree_shrink_leaves() 249 for (index, chunk) in test_data.chunks(CHUNK_SIZE as usize).enumerate() { in generate_fsverity_digest_sequentially() [all …]
|
D | verifier.rs | 27 const ZEROS: [u8; CHUNK_SIZE as usize] = [0u8; CHUNK_SIZE as usize]; 30 const SIZE_OF_FSVERITY_FORMATTED_DIGEST_SHA256: usize = 12 + Sha256Hasher::HASH_SIZE; 34 fn hash_with_padding(chunk: &[u8], pad_to: usize) -> Result<HashBuffer, CryptoError> { in hash_with_padding() 50 let chunk_hash = hash_with_padding(&chunk, CHUNK_SIZE as usize)?; in verity_check() 61 Ok(hash_with_padding(&merkle_chunk, CHUNK_SIZE as usize)?) in verity_check() 75 ) -> Result<impl Iterator<Item = Result<([u8; 4096], usize), FsverityError>> + '_, FsverityError> { in fsverity_walk() argument 98 let hash_offset_in_chunk = (global_hash_offset % CHUNK_SIZE) as usize; 143 let mut buf = [0u8; CHUNK_SIZE as usize]; in new() 160 fn read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize> { in read_chunk() argument
|
D | common.rs | 30 InsufficientData(usize),
|
/packages/modules/Virtualization/authfs/src/ |
D | common.rs | 27 remaining: usize, 29 alignment: usize, 33 pub fn new(remaining: usize, offset: u64, alignment: usize) -> Self { in new() argument 39 type Item = (u64, usize); 47 self.alignment - (self.offset % self.alignment as u64) as usize, in next() 60 fn collect_chunk_read_iter(remaining: usize, offset: u64) -> Vec<(u64, usize)> { in collect_chunk_read_iter() argument
|
D | fusefs.rs | 148 ) -> io::Result<usize> { in read_chunks() argument 150 let size_to_read = std::cmp::min(size as usize, remaining as usize); in read_chunks() 151 let total = ChunkedSizeIter::new(size_to_read, offset, CHUNK_SIZE as usize).try_fold( in read_chunks() 158 let mut buf = [0u8; CHUNK_SIZE as usize]; in read_chunks() 164 let begin = (current_offset % CHUNK_SIZE) as usize; in read_chunks() 286 ) -> io::Result<usize> { in read() argument 319 ) -> io::Result<usize> { in write() argument 322 let mut buf = vec![0; size as usize]; in write()
|
D | file.rs | 20 pub type ChunkBuffer = [u8; CHUNK_SIZE as usize]; 28 fn read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize>; in read_chunk() argument 39 fn write_at(&self, buf: &[u8], offset: u64) -> io::Result<usize>; in write_at() argument
|
D | crypto.rs | 36 pub const HASH_SIZE: usize = 32;
|
/packages/modules/Virtualization/authfs/src/file/ |
D | remote_file.rs | 35 ) -> io::Result<usize> { in remote_read_chunk() argument 62 fn read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize> { in read_chunk() argument 81 fn read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize> { in read_chunk() argument 110 fn write_at(&self, buf: &[u8], offset: u64) -> io::Result<usize> { in write_at() argument 119 Ok(size as usize) // within range because size is supposed to <= buf.len(), which is a usize in write_at() 135 fn read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize> { in read_chunk() argument
|
D | local_file.rs | 44 fn read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize> { in read_chunk() argument 50 let read_size = (end - start) as usize; in read_chunk()
|
/packages/modules/Virtualization/authfs/fd_server/src/ |
D | main.rs | 62 fn validate_and_cast_size(size: i32) -> Result<usize, Status> { in validate_and_cast_size() argument 114 let size: usize = validate_and_cast_size(size)?; in readFile() 128 let size: usize = validate_and_cast_size(size)?; in readFsverityMerkleTree() 164 let size = MAX_REQUESTING_DATA as usize; in readFsveritySignature() 171 let mut buf = vec![0; MAX_REQUESTING_DATA as usize]; in readFsveritySignature() 196 if buf.len() > i32::MAX as usize { in writeFile() 229 fn read_into_buf(file: &File, max_size: usize, offset: u64) -> io::Result<Vec<u8>> { in read_into_buf() argument 231 let buf_size = min(remaining, max_size as u64) as usize; in read_into_buf()
|
D | fsverity.rs | 42 fn read_metadata(fd: i32, metadata_type: u64, offset: u64, buf: &mut [u8]) -> io::Result<usize> { in read_metadata() argument 59 })? as usize) in read_metadata() 64 pub fn read_merkle_tree(fd: i32, offset: u64, buf: &mut [u8]) -> io::Result<usize> { in read_merkle_tree() argument 69 pub fn read_signature(fd: i32, buf: &mut [u8]) -> io::Result<usize> { in read_signature() argument
|
/packages/modules/Virtualization/zipfuse/src/ |
D | main.rs | 210 let mut buf = Vec::with_capacity(inode_data.size as usize); in open() 254 ) -> io::Result<usize> { in read() argument 260 let start = offset as usize; in read() 261 let end = start + size as usize; in read() 327 let start = offset as usize; in readdir() 331 let mut estimate: usize = 0; // estimated number of bytes we will be writing in readdir() 333 while estimate < size as usize && end < buf.len() { in readdir() 351 cur: usize, // the current index in `inner`. `next` advances this. 463 assert_eq!(content.len(), metadata.len() as usize); in check_file() 614 const NUM_FILES: usize = 1 << 10; in large_dir()
|
D | inode.rs | 47 type ZipIndex = usize; 123 _ => self.table.get(inode as usize), in get() 130 _ => self.table.get_mut(inode as usize), in get_mut()
|
/packages/modules/DnsResolver/ |
D | doh.rs | 51 const MAX_BUFFERED_CMD_SIZE: usize = 400; 55 const MAX_DATAGRAM_SIZE: usize = 1350; 469 if resp.len() > response_len || resp.len() > isize::MAX as usize { in doh_query()
|
/packages/modules/Virtualization/authfs/ |
D | Android.bp | 42 bindgen_flags: ["--size_t-is-usize"],
|