1 // Copyright 2022 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 //! Linux arm64 kernel loader.
6 //! <https://www.kernel.org/doc/Documentation/arm64/booting.txt>
7
8 use std::cmp::max;
9 use std::io;
10 use std::io::BufRead;
11 use std::io::Read;
12 use std::io::Seek;
13 use std::io::SeekFrom;
14 use std::mem::size_of_val;
15
16 use base::warn;
17 use base::FileGetLen;
18 use base::FileReadWriteAtVolatile;
19 use base::VolatileSlice;
20 use data_model::Le32;
21 use data_model::Le64;
22 use lz4_flex::frame::FrameDecoder as Lz4FrameDecoder;
23 use resources::AddressRange;
24 use vm_memory::GuestAddress;
25 use vm_memory::GuestMemory;
26 use zerocopy::FromBytes;
27 use zerocopy::FromZeros;
28 use zerocopy::Immutable;
29 use zerocopy::IntoBytes;
30 use zerocopy::KnownLayout;
31
32 use crate::Error;
33 use crate::LoadedKernel;
34 use crate::Result;
35
36 #[derive(Copy, Clone, FromBytes, Immutable, IntoBytes, KnownLayout)]
37 #[allow(unused)]
38 #[repr(C)]
39 struct Arm64ImageHeader {
40 code0: Le32,
41 code1: Le32,
42 text_offset: Le64,
43 image_size: Le64,
44 flags: Le64,
45 res2: Le64,
46 res3: Le64,
47 res4: Le64,
48 magic: Le32,
49 res5: Le32,
50 }
51
52 const ARM64_IMAGE_MAGIC: u32 = 0x644d5241; // "ARM\x64"
53
54 const ARM64_IMAGE_FLAG_BE_MASK: u64 = 0x1;
55
56 const ARM64_TEXT_OFFSET_DEFAULT: u64 = 0x80000;
57
58 impl Arm64ImageHeader {
parse_load_addr(&self, kernel_start: GuestAddress) -> Result<GuestAddress>59 fn parse_load_addr(&self, kernel_start: GuestAddress) -> Result<GuestAddress> {
60 let magic: u32 = self.magic.into();
61 if magic != ARM64_IMAGE_MAGIC {
62 return Err(Error::InvalidMagicNumber);
63 }
64
65 let flags: u64 = self.flags.into();
66 if flags & ARM64_IMAGE_FLAG_BE_MASK != 0 {
67 return Err(Error::BigEndianOnLittle);
68 }
69
70 let mut text_offset: u64 = self.text_offset.into();
71 let image_size: u64 = self.image_size.into();
72
73 if image_size == 0 {
74 warn!("arm64 Image header has an effective size of zero");
75 // arm64/booting.txt:
76 // "Where image_size is zero, text_offset can be assumed to be 0x80000."
77 text_offset = ARM64_TEXT_OFFSET_DEFAULT;
78 }
79
80 // Load the image into guest memory at `text_offset` bytes past `kernel_start`.
81 kernel_start
82 .checked_add(text_offset)
83 .ok_or(Error::InvalidKernelOffset)
84 }
85 }
86
load_arm64_kernel<F>( guest_mem: &GuestMemory, kernel_start: GuestAddress, kernel_image: &mut F, ) -> Result<LoadedKernel> where F: FileReadWriteAtVolatile + FileGetLen,87 pub fn load_arm64_kernel<F>(
88 guest_mem: &GuestMemory,
89 kernel_start: GuestAddress,
90 kernel_image: &mut F,
91 ) -> Result<LoadedKernel>
92 where
93 F: FileReadWriteAtVolatile + FileGetLen,
94 {
95 let mut header = Arm64ImageHeader::new_zeroed();
96 kernel_image
97 .read_exact_at_volatile(VolatileSlice::new(header.as_mut_bytes()), 0)
98 .map_err(|_| Error::ReadHeader)?;
99 let load_addr = header.parse_load_addr(kernel_start)?;
100
101 let file_size = kernel_image.get_len().map_err(|_| Error::SeekKernelEnd)?;
102 let load_size = usize::try_from(file_size).map_err(|_| Error::InvalidKernelSize)?;
103 let range_size = max(file_size, u64::from(header.image_size));
104
105 let guest_slice = guest_mem
106 .get_slice_at_addr(load_addr, load_size)
107 .map_err(|_| Error::ReadKernelImage)?;
108 kernel_image
109 .read_exact_at_volatile(guest_slice, 0)
110 .map_err(|_| Error::ReadKernelImage)?;
111
112 Ok(LoadedKernel {
113 size: file_size,
114 address_range: AddressRange::from_start_and_size(load_addr.offset(), range_size)
115 .ok_or(Error::InvalidKernelSize)?,
116 entry: load_addr,
117 })
118 }
119
load_arm64_kernel_from_reader<F: BufRead>( guest_mem: &GuestMemory, kernel_start: GuestAddress, mut kernel_image: F, ) -> Result<LoadedKernel>120 fn load_arm64_kernel_from_reader<F: BufRead>(
121 guest_mem: &GuestMemory,
122 kernel_start: GuestAddress,
123 mut kernel_image: F,
124 ) -> Result<LoadedKernel> {
125 let mut header = Arm64ImageHeader::new_zeroed();
126 let header_size = u64::try_from(size_of_val(&header)).unwrap();
127
128 // Read and parse the kernel header.
129 kernel_image
130 .read_exact(header.as_mut_bytes())
131 .map_err(|_| Error::ReadHeader)?;
132 let load_addr = header.parse_load_addr(kernel_start)?;
133
134 // Write the parsed kernel header to memory. Avoid rewinding the reader back to the start.
135 guest_mem
136 .write_all_at_addr(header.as_bytes(), load_addr)
137 .map_err(|_| Error::ReadKernelImage)?;
138
139 // Continue reading from the source and copy the kernel image into GuestMemory.
140 let mut current_addr = load_addr
141 .checked_add(header_size)
142 .ok_or(Error::InvalidKernelSize)?;
143 loop {
144 let buf = match kernel_image.fill_buf() {
145 Ok([]) => break,
146 Ok(buf) => buf,
147 Err(ref e) if e.kind() == io::ErrorKind::Interrupted => continue,
148 Err(_) => return Err(Error::ReadKernelImage),
149 };
150
151 guest_mem
152 .write_all_at_addr(buf, current_addr)
153 .map_err(|_| Error::ReadKernelImage)?;
154
155 let consumed = buf.len();
156 kernel_image.consume(consumed);
157
158 let offset = u64::try_from(consumed).map_err(|_| Error::InvalidKernelSize)?;
159 current_addr = current_addr
160 .checked_add(offset)
161 .ok_or(Error::InvalidKernelSize)?;
162 }
163
164 let file_size = current_addr.offset_from(load_addr);
165 let range_size = max(file_size, u64::from(header.image_size));
166 Ok(LoadedKernel {
167 size: file_size,
168 address_range: AddressRange::from_start_and_size(load_addr.offset(), range_size)
169 .ok_or(Error::InvalidKernelSize)?,
170 entry: load_addr,
171 })
172 }
173
load_arm64_kernel_lz4<F: Read + Seek>( guest_mem: &GuestMemory, kernel_start: GuestAddress, mut kernel_image: F, ) -> Result<LoadedKernel>174 pub fn load_arm64_kernel_lz4<F: Read + Seek>(
175 guest_mem: &GuestMemory,
176 kernel_start: GuestAddress,
177 mut kernel_image: F,
178 ) -> Result<LoadedKernel> {
179 kernel_image
180 .seek(SeekFrom::Start(0))
181 .map_err(|_| Error::SeekKernelStart)?;
182 load_arm64_kernel_from_reader(
183 guest_mem,
184 kernel_start,
185 &mut Lz4FrameDecoder::new(kernel_image),
186 )
187 }
188
189 #[cfg(test)]
190 mod test {
191 use std::fs::File;
192 use std::io::Seek;
193 use std::io::SeekFrom;
194 use std::io::Write;
195
196 use tempfile::tempfile;
197 use vm_memory::GuestAddress;
198 use vm_memory::GuestMemory;
199
200 use crate::load_arm64_kernel;
201 use crate::load_arm64_kernel_lz4;
202 use crate::Error;
203
204 const MEM_SIZE: u64 = 0x200_0000;
205
create_guest_mem() -> GuestMemory206 fn create_guest_mem() -> GuestMemory {
207 GuestMemory::new(&[(GuestAddress(0x0), MEM_SIZE)]).unwrap()
208 }
209
210 #[allow(clippy::unusual_byte_groupings)]
write_valid_kernel() -> File211 fn write_valid_kernel() -> File {
212 let mut f = tempfile().expect("failed to create tempfile");
213
214 f.write_all(&[0x00, 0xC0, 0x2E, 0x14]).unwrap(); // code0
215 f.write_all(&[0x00, 0x00, 0x00, 0x00]).unwrap(); // code1
216 f.write_all(&0x00000000_00E70000u64.to_le_bytes()).unwrap(); // text_offset
217 f.write_all(&0x00000000_0000000Au64.to_le_bytes()).unwrap(); // image_size
218 f.write_all(&0x00000000_00000000u64.to_le_bytes()).unwrap(); // flags
219 f.write_all(&0x00000000_00000000u64.to_le_bytes()).unwrap(); // res2
220 f.write_all(&0x00000000_00000000u64.to_le_bytes()).unwrap(); // res3
221 f.write_all(&0x00000000_00000000u64.to_le_bytes()).unwrap(); // res4
222 f.write_all(&0x644D5241u32.to_le_bytes()).unwrap(); // magic
223 f.write_all(&0x00000000u32.to_le_bytes()).unwrap(); // res5
224
225 f.set_len(0xDC3808).unwrap();
226 f
227 }
228
mutate_file(mut f: &File, offset: u64, val: &[u8])229 fn mutate_file(mut f: &File, offset: u64, val: &[u8]) {
230 f.seek(SeekFrom::Start(offset))
231 .expect("failed to seek file");
232 f.write_all(val)
233 .expect("failed to write mutated value to file");
234 }
235
236 #[test]
load_arm64_valid()237 fn load_arm64_valid() {
238 let gm = create_guest_mem();
239 let kernel_addr = GuestAddress(2 * 1024 * 1024);
240 let mut f = write_valid_kernel();
241 let kernel = load_arm64_kernel(&gm, kernel_addr, &mut f).unwrap();
242 assert_eq!(kernel.address_range.start, 0x107_0000);
243 assert_eq!(kernel.address_range.end, 0x1E3_3807);
244 assert_eq!(kernel.size, 0xDC_3808);
245 assert_eq!(kernel.entry, GuestAddress(0x107_0000));
246 }
247
248 #[test]
load_arm64_image_size_zero()249 fn load_arm64_image_size_zero() {
250 let gm = create_guest_mem();
251 let kernel_addr = GuestAddress(2 * 1024 * 1024);
252 let mut f = write_valid_kernel();
253
254 // Set image_size = 0 and validate the default text_offset is applied.
255 mutate_file(&f, 16, &0u64.to_le_bytes());
256
257 let kernel = load_arm64_kernel(&gm, kernel_addr, &mut f).unwrap();
258 assert_eq!(kernel.address_range.start, 0x28_0000);
259 assert_eq!(kernel.address_range.end, 0x104_3807);
260 assert_eq!(kernel.size, 0xDC_3808);
261 assert_eq!(kernel.entry, GuestAddress(0x28_0000));
262 }
263
264 #[test]
load_arm64_bad_magic()265 fn load_arm64_bad_magic() {
266 let gm = create_guest_mem();
267 let kernel_addr = GuestAddress(2 * 1024 * 1024);
268 let mut f = write_valid_kernel();
269
270 // Mutate magic number so it doesn't match
271 mutate_file(&f, 56, &[0xCC, 0xCC, 0xCC, 0xCC]);
272
273 assert_eq!(
274 load_arm64_kernel(&gm, kernel_addr, &mut f),
275 Err(Error::InvalidMagicNumber)
276 );
277 }
278
write_valid_kernel_lz4() -> File279 fn write_valid_kernel_lz4() -> File {
280 let mut f = tempfile().expect("failed to create tempfile");
281
282 f.write_all(&0x184d2204u32.to_le_bytes()).unwrap(); // magic
283 f.write_all(&[0x44, 0x70, 0x1d]).unwrap(); // flg, bd, hc
284
285 // Compressed block #1.
286 f.write_all(&0x00004065u32.to_le_bytes()).unwrap();
287 f.write_all(&[
288 0x51, 0x00, 0xc0, 0x2e, 0x14, 0x00, 0x01, 0x00, 0x11, 0xe7, 0x06, 0x00, 0x11, 0x0a,
289 0x06, 0x00, 0x0f, 0x02, 0x00, 0x0f, 0x4f, 0x41, 0x52, 0x4d, 0x64, 0x26, 0x00, 0x0f,
290 0x0f, 0x02, 0x00,
291 ])
292 .unwrap();
293 f.write_all(&[0xff; 16447]).unwrap();
294
295 // Compressed block #2.
296 f.write_all(&0x000050c9u32.to_le_bytes()).unwrap();
297 f.write_all(&[
298 0x00, 0x00, 0x00, 0x4b, 0x40, 0x00, 0x00, 0x1f, 0x00, 0x01, 0x00,
299 ])
300 .unwrap();
301 f.write_all(&[0xff; 16448]).unwrap();
302
303 // Compressed block #3.
304 f.write_all(&0x00005027u32.to_le_bytes()).unwrap();
305 f.write_all(&[
306 0x00, 0x00, 0x00, 0x4b, 0x40, 0x00, 0x00, 0x1f, 0x00, 0x01, 0x00,
307 ])
308 .unwrap();
309 f.write_all(&[0xff; 16448]).unwrap();
310
311 // Compressed block #4.
312 f.write_all(&0x00005027u32.to_le_bytes()).unwrap();
313 f.write_all(&[
314 0x00, 0x00, 0x00, 0x5f, 0x1c, 0x00, 0x00, 0x1f, 0x00, 0x01, 0x00,
315 ])
316 .unwrap();
317 f.write_all(&[0xff; 7252]).unwrap();
318 f.write_all(&[0x43, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00])
319 .unwrap();
320
321 // EndMark
322 f.write_all(&0x00000000u32.to_le_bytes()).unwrap();
323
324 // Checksum
325 f.write_all(&0x22a9944cu32.to_le_bytes()).unwrap();
326
327 f
328 }
329
write_valid_kernel_lz4_legacy() -> File330 fn write_valid_kernel_lz4_legacy() -> File {
331 let mut f = tempfile().expect("failed to create tempfile");
332
333 f.write_all(&0x184c2102u32.to_le_bytes()).unwrap(); // magic
334
335 // Compressed block #1.
336 f.write_all(&0x000080a6u32.to_le_bytes()).unwrap();
337 f.write_all(&[
338 0x51, 0x00, 0xc0, 0x2e, 0x14, 0x00, 0x01, 0x00, 0x11, 0xe7, 0x06, 0x00, 0x11, 0x0a,
339 0x06, 0x00, 0x0f, 0x02, 0x00, 0x0f, 0x4f, 0x41, 0x52, 0x4d, 0x64, 0x26, 0x00, 0x0f,
340 0x0f, 0x02, 0x00,
341 ])
342 .unwrap();
343 f.write_all(&[0xff; 32896]).unwrap();
344
345 // Compressed block #2.
346 f.write_all(&0x0000500au32.to_le_bytes()).unwrap();
347 f.write_all(&[
348 0x00, 0x00, 0x00, 0x9f, 0x5c, 0x00, 0x00, 0x1f, 0x00, 0x01, 0x00,
349 ])
350 .unwrap();
351 f.write_all(&[0xff; 23700]).unwrap();
352 f.write_all(&[0x83, 0x50, 0x00]).unwrap();
353
354 // EndMark
355 f.write_all(&[0x00, 0x00, 0x00, 0x00]).unwrap();
356
357 f
358 }
359
360 #[test]
load_arm64_lz4_valid()361 fn load_arm64_lz4_valid() {
362 let gm = create_guest_mem();
363 let kernel_addr = GuestAddress(2 * 1024 * 1024);
364 let mut f = write_valid_kernel_lz4();
365 let kernel = load_arm64_kernel_lz4(&gm, kernel_addr, &mut f).unwrap();
366 assert_eq!(kernel.address_range.start, 0x107_0000);
367 assert_eq!(kernel.address_range.end, 0x1E3_3807);
368 assert_eq!(kernel.size, 0xDC_3808);
369 assert_eq!(kernel.entry, GuestAddress(0x107_0000));
370 }
371
372 #[test]
load_arm64_lz4_bad_magic()373 fn load_arm64_lz4_bad_magic() {
374 let gm = create_guest_mem();
375 let kernel_addr = GuestAddress(2 * 1024 * 1024);
376 let mut f = write_valid_kernel_lz4();
377
378 mutate_file(&f, 0, &[0xCC, 0xCC, 0xCC, 0xCC]);
379
380 assert_eq!(
381 load_arm64_kernel_lz4(&gm, kernel_addr, &mut f),
382 Err(Error::ReadHeader)
383 );
384 }
385
386 #[test]
load_arm64_lz4_bad_block()387 fn load_arm64_lz4_bad_block() {
388 let gm = create_guest_mem();
389 let kernel_addr = GuestAddress(2 * 1024 * 1024);
390 let mut f = write_valid_kernel_lz4();
391
392 mutate_file(&f, 7, &[0xCC, 0xCC, 0xCC, 0xCC]);
393
394 assert_eq!(
395 load_arm64_kernel_lz4(&gm, kernel_addr, &mut f),
396 Err(Error::ReadHeader)
397 );
398 }
399
400 #[test]
load_arm64_lz4_legacy_valid()401 fn load_arm64_lz4_legacy_valid() {
402 let gm = create_guest_mem();
403 let kernel_addr = GuestAddress(2 * 1024 * 1024);
404 let mut f = write_valid_kernel_lz4_legacy();
405 let kernel = load_arm64_kernel_lz4(&gm, kernel_addr, &mut f).unwrap();
406 assert_eq!(kernel.address_range.start, 0x107_0000);
407 assert_eq!(kernel.address_range.end, 0x1E3_3807);
408 assert_eq!(kernel.size, 0xDC_3808);
409 assert_eq!(kernel.entry, GuestAddress(0x107_0000));
410 }
411
412 #[test]
load_arm64_lz4_legacy_bad_magic()413 fn load_arm64_lz4_legacy_bad_magic() {
414 let gm = create_guest_mem();
415 let kernel_addr = GuestAddress(2 * 1024 * 1024);
416 let mut f = write_valid_kernel_lz4_legacy();
417
418 mutate_file(&f, 0, &[0xCC, 0xCC, 0xCC, 0xCC]);
419
420 assert_eq!(
421 load_arm64_kernel_lz4(&gm, kernel_addr, &mut f),
422 Err(Error::ReadHeader)
423 );
424 }
425
426 #[test]
load_arm64_lz4_legacy_bad_block()427 fn load_arm64_lz4_legacy_bad_block() {
428 let gm = create_guest_mem();
429 let kernel_addr = GuestAddress(2 * 1024 * 1024);
430 let mut f = write_valid_kernel_lz4_legacy();
431
432 mutate_file(&f, 4, &[0xCC, 0xCC, 0xCC, 0xCC]);
433
434 assert_eq!(
435 load_arm64_kernel_lz4(&gm, kernel_addr, &mut f),
436 Err(Error::ReadHeader)
437 );
438 }
439 }
440