• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2024, The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //     http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 use crate::{
16     as_uninit, check_range, is_aligned, is_buffer_aligned, BlockInfo, BlockIo, SliceMaybeUninit,
17 };
18 use core::cmp::min;
19 use liberror::Result;
20 use libutils::aligned_subslice;
21 use safemath::SafeNum;
22 
23 /// Reads from a range at block boundary to an aligned buffer.
read_aligned_all( io: &mut impl BlockIo, offset: u64, out: &mut (impl SliceMaybeUninit + ?Sized), ) -> Result<()>24 async fn read_aligned_all(
25     io: &mut impl BlockIo,
26     offset: u64,
27     out: &mut (impl SliceMaybeUninit + ?Sized),
28 ) -> Result<()> {
29     let blk_offset = check_range(io.info(), offset, out.as_ref())?.try_into()?;
30     Ok(io.read_blocks(blk_offset, out).await?)
31 }
32 
33 /// Read with block-aligned offset and aligned buffer. Size don't need to be block aligned.
34 ///   |~~~~~~~~~read~~~~~~~~~|
35 ///   |---------|---------|---------|
read_aligned_offset_and_buffer( io: &mut impl BlockIo, offset: u64, out: &mut (impl SliceMaybeUninit + ?Sized), scratch: &mut [u8], ) -> Result<()>36 async fn read_aligned_offset_and_buffer(
37     io: &mut impl BlockIo,
38     offset: u64,
39     out: &mut (impl SliceMaybeUninit + ?Sized),
40     scratch: &mut [u8],
41 ) -> Result<()> {
42     let block_size = SafeNum::from(io.info().block_size);
43     debug_assert!(is_aligned(offset, block_size)?);
44     debug_assert!(is_buffer_aligned(out.as_ref(), io.info().alignment)?);
45 
46     let aligned_read: usize = SafeNum::from(out.len()).round_down(block_size).try_into()?;
47 
48     if aligned_read > 0 {
49         read_aligned_all(io, offset, out.get_mut(..aligned_read)?).await?;
50     }
51     let unaligned = out.get_mut(aligned_read..)?;
52     if unaligned.is_empty() {
53         return Ok(());
54     }
55     // Read unalinged part.
56     let block_scratch = &mut scratch[..block_size.try_into()?];
57     let aligned_offset = SafeNum::from(offset) + aligned_read;
58     read_aligned_all(io, aligned_offset.try_into()?, block_scratch).await?;
59     unaligned.clone_from_slice(as_uninit(&block_scratch[..unaligned.len()]));
60     Ok(())
61 }
62 
63 /// Read with aligned buffer. Offset and size don't need to be block aligned.
64 /// Case 1:
65 ///            |~~~~~~read~~~~~~~|
66 ///        |------------|------------|
67 /// Case 2:
68 ///          |~~~read~~~|
69 ///        |---------------|--------------|
read_aligned_buffer( io: &mut impl BlockIo, offset: u64, out: &mut (impl SliceMaybeUninit + ?Sized), scratch: &mut [u8], ) -> Result<()>70 async fn read_aligned_buffer(
71     io: &mut impl BlockIo,
72     offset: u64,
73     out: &mut (impl SliceMaybeUninit + ?Sized),
74     scratch: &mut [u8],
75 ) -> Result<()> {
76     debug_assert!(is_buffer_aligned(out.as_ref(), io.info().alignment)?);
77 
78     if is_aligned(offset, io.info().block_size)? {
79         return read_aligned_offset_and_buffer(io, offset, out, scratch).await;
80     }
81     let offset = SafeNum::from(offset);
82     let aligned_start: u64 =
83         min(offset.round_up(io.info().block_size).try_into()?, (offset + out.len()).try_into()?);
84 
85     let aligned_relative_offset: usize = (SafeNum::from(aligned_start) - offset).try_into()?;
86     if aligned_relative_offset < out.len() {
87         if is_buffer_aligned(&out.get(aligned_relative_offset..)?, io.info().alignment)? {
88             // If new output address is aligned, read directly.
89             read_aligned_offset_and_buffer(
90                 io,
91                 aligned_start,
92                 out.get_mut(aligned_relative_offset..)?,
93                 scratch,
94             )
95             .await?;
96         } else {
97             // Otherwise read into `out` (assumed aligned) and memmove to the correct
98             // position
99             let read_len: usize =
100                 (SafeNum::from(out.len()) - aligned_relative_offset).try_into()?;
101             read_aligned_offset_and_buffer(io, aligned_start, out.get_mut(..read_len)?, scratch)
102                 .await?;
103             out.as_mut().copy_within(..read_len, aligned_relative_offset);
104         }
105     }
106 
107     // Now read the unaligned part
108     let block_scratch = &mut scratch[..SafeNum::from(io.info().block_size).try_into()?];
109     let round_down_offset = offset.round_down(io.info().block_size);
110     read_aligned_all(io, round_down_offset.try_into()?, block_scratch).await?;
111     let offset_relative = offset - round_down_offset;
112     let unaligned = out.get_mut(..aligned_relative_offset)?;
113     unaligned.clone_from_slice(as_uninit(
114         &block_scratch
115             [offset_relative.try_into()?..(offset_relative + unaligned.len()).try_into()?],
116     ));
117     Ok(())
118 }
119 
120 // Partition a scratch into two aligned parts: [u8; alignment()-1] and [u8; block_size())]
121 // for handling block and buffer misalignment respecitvely.
split_scratch<'a>( info: BlockInfo, scratch: &'a mut [u8], ) -> Result<(&'a mut [u8], &'a mut [u8])>122 fn split_scratch<'a>(
123     info: BlockInfo,
124     scratch: &'a mut [u8],
125 ) -> Result<(&'a mut [u8], &'a mut [u8])> {
126     let (buffer_alignment, block_alignment) = aligned_subslice(scratch, info.alignment)?
127         .split_at_mut((SafeNum::from(info.alignment) - 1).try_into()?);
128     let block_alignment = aligned_subslice(block_alignment, info.alignment)?;
129     let block_alignment_scratch_size = match info.block_size {
130         1 => SafeNum::ZERO,
131         v => v.into(),
132     };
133     Ok((buffer_alignment, &mut block_alignment[..block_alignment_scratch_size.try_into()?]))
134 }
135 
136 /// Read with no alignment requirement.
read_async( io: &mut impl BlockIo, offset: u64, out: &mut (impl SliceMaybeUninit + ?Sized), scratch: &mut [u8], ) -> Result<()>137 pub async fn read_async(
138     io: &mut impl BlockIo,
139     offset: u64,
140     out: &mut (impl SliceMaybeUninit + ?Sized),
141     scratch: &mut [u8],
142 ) -> Result<()> {
143     let (buffer_alignment_scratch, block_alignment_scratch) = split_scratch(io.info(), scratch)?;
144 
145     if is_buffer_aligned(out.as_ref(), io.info().alignment)? {
146         return read_aligned_buffer(io, offset, out, block_alignment_scratch).await;
147     }
148 
149     // Buffer misalignment:
150     // Case 1:
151     //     |~~~~~~~~~~~~buffer~~~~~~~~~~~~|
152     //   |----------------------|---------------------|
153     //      io.info().alignment
154     //
155     // Case 2:
156     //    |~~~~~~buffer~~~~~|
157     //  |----------------------|---------------------|
158     //     io.info().alignment
159 
160     let out_addr_value = SafeNum::from(out.as_mut().as_ptr() as usize);
161     let unaligned_read: usize =
162         min((out_addr_value.round_up(io.info().alignment) - out_addr_value).try_into()?, out.len());
163 
164     // Read unaligned part
165     let unaligned_out = &mut buffer_alignment_scratch[..unaligned_read];
166     read_aligned_buffer(io, offset, unaligned_out, block_alignment_scratch).await?;
167     out.get_mut(..unaligned_read)?.clone_from_slice(as_uninit(unaligned_out));
168 
169     if unaligned_read == out.len() {
170         return Ok(());
171     }
172     // Read aligned part
173     read_aligned_buffer(
174         io,
175         (SafeNum::from(offset) + unaligned_read).try_into()?,
176         out.get_mut(unaligned_read..)?,
177         block_alignment_scratch,
178     )
179     .await
180 }
181 
182 /// Write bytes from aligned buffer to a block boundary range.
write_aligned_all(io: &mut impl BlockIo, offset: u64, data: &mut [u8]) -> Result<()>183 async fn write_aligned_all(io: &mut impl BlockIo, offset: u64, data: &mut [u8]) -> Result<()> {
184     let blk_offset = check_range(io.info(), offset, data)?.try_into()?;
185     Ok(io.write_blocks(blk_offset, data).await?)
186 }
187 
188 /// Write with block-aligned offset and aligned buffer. `data.len()` can be unaligned.
189 ///   |~~~~~~~~~size~~~~~~~~~|
190 ///   |---------|---------|---------|
write_aligned_offset_and_buffer( io: &mut impl BlockIo, offset: u64, data: &mut [u8], scratch: &mut [u8], ) -> Result<()>191 async fn write_aligned_offset_and_buffer(
192     io: &mut impl BlockIo,
193     offset: u64,
194     data: &mut [u8],
195     scratch: &mut [u8],
196 ) -> Result<()> {
197     debug_assert!(is_aligned(offset, io.info().block_size)?);
198     debug_assert!(is_buffer_aligned(data, io.info().alignment)?);
199 
200     let aligned_write: usize =
201         SafeNum::from(data.len()).round_down(io.info().block_size).try_into()?;
202     if aligned_write > 0 {
203         write_aligned_all(io, offset, &mut data[..aligned_write]).await?;
204     }
205     let unaligned = &data[aligned_write..];
206     if unaligned.len() == 0 {
207         return Ok(());
208     }
209 
210     // Perform read-modify-write for the unaligned part
211     let unaligned_start: u64 = (SafeNum::from(offset) + aligned_write).try_into()?;
212     let block_scratch = &mut scratch[..SafeNum::from(io.info().block_size).try_into()?];
213     read_aligned_all(io, unaligned_start, block_scratch).await?;
214     block_scratch[..unaligned.len()].clone_from_slice(unaligned);
215     write_aligned_all(io, unaligned_start, block_scratch).await
216 }
217 
218 // Rotates buffer to the left.
rotate_left(slice: &mut [u8], sz: usize, scratch: &mut [u8])219 fn rotate_left(slice: &mut [u8], sz: usize, scratch: &mut [u8]) {
220     scratch[..sz].clone_from_slice(&slice[..sz]);
221     slice.copy_within(sz.., 0);
222     let off = slice.len().checked_sub(sz).unwrap();
223     slice[off..].clone_from_slice(&scratch[..sz]);
224 }
225 
226 // Rotates buffer to the right.
rotate_right(slice: &mut [u8], sz: usize, scratch: &mut [u8])227 fn rotate_right(slice: &mut [u8], sz: usize, scratch: &mut [u8]) {
228     let off = slice.len().checked_sub(sz).unwrap();
229     scratch[..sz].clone_from_slice(&slice[off..]);
230     slice.copy_within(..off, sz);
231     slice[..sz].clone_from_slice(&scratch[..sz]);
232 }
233 
234 /// Write with aligned buffer. Offset and size don't need to be block aligned.
235 /// Case 1:
236 ///            |~~~~~~write~~~~~~~|
237 ///        |------------|------------|
238 /// Case 2:
239 ///          |~~~write~~~|
240 ///        |---------------|--------------|
write_aligned_buffer( io: &mut impl BlockIo, offset: u64, data: &mut [u8], scratch: &mut [u8], ) -> Result<()>241 async fn write_aligned_buffer(
242     io: &mut impl BlockIo,
243     offset: u64,
244     data: &mut [u8],
245     scratch: &mut [u8],
246 ) -> Result<()> {
247     debug_assert!(is_buffer_aligned(data, io.info().alignment)?);
248 
249     let offset = SafeNum::from(offset);
250     if is_aligned(offset, io.info().block_size)? {
251         return write_aligned_offset_and_buffer(io, offset.try_into()?, data, scratch).await;
252     }
253 
254     let aligned_start: u64 =
255         min(offset.round_up(io.info().block_size).try_into()?, (offset + data.len()).try_into()?);
256     let aligned_relative_offset: usize = (SafeNum::from(aligned_start) - offset).try_into()?;
257     if aligned_relative_offset < data.len() {
258         if is_buffer_aligned(&data[aligned_relative_offset..], io.info().alignment)? {
259             // If new address is aligned, write directly.
260             write_aligned_offset_and_buffer(
261                 io,
262                 aligned_start,
263                 &mut data[aligned_relative_offset..],
264                 scratch,
265             )
266             .await?;
267         } else {
268             let write_len: usize =
269                 (SafeNum::from(data.len()) - aligned_relative_offset).try_into()?;
270             // Swap the offset-aligned part to the beginning of the buffer (assumed aligned)
271             rotate_left(data, aligned_relative_offset, scratch);
272             let res =
273                 write_aligned_offset_and_buffer(io, aligned_start, &mut data[..write_len], scratch)
274                     .await;
275             // Swap the two parts back before checking the result.
276             rotate_right(data, aligned_relative_offset, scratch);
277             res?;
278         }
279     }
280 
281     // perform read-modify-write for the unaligned part.
282     let block_scratch = &mut scratch[..SafeNum::from(io.info().block_size).try_into()?];
283     let round_down_offset: u64 = offset.round_down(io.info().block_size).try_into()?;
284     read_aligned_all(io, round_down_offset, block_scratch).await?;
285     let offset_relative = offset - round_down_offset;
286     block_scratch
287         [offset_relative.try_into()?..(offset_relative + aligned_relative_offset).try_into()?]
288         .clone_from_slice(&data[..aligned_relative_offset]);
289     write_aligned_all(io, round_down_offset, block_scratch).await
290 }
291 
292 /// Writes bytes to the block device.
293 /// It does internal optimization that temporarily modifies `data` layout to minimize number of
294 /// calls to `io.read_blocks()`/`io.write_blocks()` (down to O(1)).
write_async( io: &mut impl BlockIo, offset: u64, data: &mut [u8], scratch: &mut [u8], ) -> Result<()>295 pub async fn write_async(
296     io: &mut impl BlockIo,
297     offset: u64,
298     data: &mut [u8],
299     scratch: &mut [u8],
300 ) -> Result<()> {
301     let (buffer_alignment_scratch, block_alignment_scratch) = split_scratch(io.info(), scratch)?;
302     if is_buffer_aligned(data, io.info().alignment)? {
303         return write_aligned_buffer(io, offset, data, block_alignment_scratch).await;
304     }
305 
306     // Buffer misalignment:
307     // Case 1:
308     //     |~~~~~~~~~~~~buffer~~~~~~~~~~~~|
309     //   |----------------------|---------------------|
310     //      io.alignment()
311     //
312     // Case 2:
313     //    |~~~~~~buffer~~~~~|
314     //  |----------------------|---------------------|
315     //     io.alignment()
316 
317     // Write unaligned part
318     let data_addr_value = SafeNum::from(data.as_ptr() as usize);
319     let unaligned_write: usize = min(
320         (data_addr_value.round_up(io.info().alignment) - data_addr_value).try_into()?,
321         data.len(),
322     );
323     let mut unaligned_data = &mut buffer_alignment_scratch[..unaligned_write];
324     unaligned_data.clone_from_slice(&data[..unaligned_write]);
325     write_aligned_buffer(io, offset, &mut unaligned_data, block_alignment_scratch).await?;
326     if unaligned_write == data.len() {
327         return Ok(());
328     }
329 
330     // Write aligned part
331     write_aligned_buffer(
332         io,
333         (SafeNum::from(offset) + unaligned_write).try_into()?,
334         &mut data[unaligned_write..],
335         block_alignment_scratch,
336     )
337     .await
338 }
339