1 use crate::{defrag::*, *}; 2 use std::vec::Vec; 3 4 /// Buffer to reconstruct a single fragmented IP packet. 5 #[derive(Debug, Clone, Eq, PartialEq, Hash, Ord, PartialOrd)] 6 pub struct IpDefragBuf { 7 /// IP number identifying the type of payload. 8 ip_number: IpNumber, 9 10 /// Data buffer that should contain the SOMEIP header + reconstructed payload in the end. 11 data: Vec<u8>, 12 13 /// Contains the ranges filled with data. 14 sections: Vec<IpFragRange>, 15 16 /// End length of the defragmented packet (set if a packet with ) 17 end: Option<u16>, 18 } 19 20 impl IpDefragBuf { new( ip_number: IpNumber, mut data: Vec<u8>, mut sections: Vec<IpFragRange>, ) -> IpDefragBuf21 pub fn new( 22 ip_number: IpNumber, 23 mut data: Vec<u8>, 24 mut sections: Vec<IpFragRange>, 25 ) -> IpDefragBuf { 26 IpDefragBuf { 27 ip_number, 28 data: { 29 data.clear(); 30 data 31 }, 32 sections: { 33 sections.clear(); 34 sections 35 }, 36 end: None, 37 } 38 } 39 40 /// Return the ip number of the payload data that gets restored. 41 #[inline] ip_number(&self) -> IpNumber42 pub fn ip_number(&self) -> IpNumber { 43 self.ip_number 44 } 45 46 /// Data buffer in which data packet is reconstructed. 47 #[inline] data(&self) -> &Vec<u8>48 pub fn data(&self) -> &Vec<u8> { 49 &self.data 50 } 51 52 /// Sections completed of the packet. 53 #[inline] sections(&self) -> &Vec<IpFragRange>54 pub fn sections(&self) -> &Vec<IpFragRange> { 55 &self.sections 56 } 57 58 /// Sections completed of the packet. 59 #[inline] end(&self) -> Option<u16>60 pub fn end(&self) -> Option<u16> { 61 self.end 62 } 63 64 /// Add a IPv4 slice 65 #[cfg(any(target_pointer_width = "32", target_pointer_width = "64"))] add( &mut self, offset: IpFragOffset, more_fragments: bool, payload: &[u8], ) -> Result<(), IpDefragError>66 pub fn add( 67 &mut self, 68 offset: IpFragOffset, 69 more_fragments: bool, 70 payload: &[u8], 71 ) -> Result<(), IpDefragError> { 72 use IpDefragError::*; 73 74 // validate lengths 75 let Ok(len_u16) = u16::try_from(payload.len()) else { 76 return Err(SegmentTooBig { 77 offset, 78 payload_len: payload.len(), 79 max: MAX_IP_DEFRAG_LEN_U16, 80 }); 81 }; 82 83 let Some(end) = offset.byte_offset().checked_add(len_u16) else { 84 return Err(SegmentTooBig { 85 offset, 86 payload_len: payload.len(), 87 max: MAX_IP_DEFRAG_LEN_U16, 88 }); 89 }; 90 91 // validate that the payload len is a multiple of 8 in case it is not the end 92 if more_fragments && 0 != payload.len() & 0b111 { 93 return Err(UnalignedFragmentPayloadLen { 94 offset, 95 payload_len: payload.len(), 96 }); 97 } 98 99 // check the section is not already ended 100 if let Some(previous_end) = self.end { 101 // either the end is after the current position 102 if previous_end < end || ((false == more_fragments) && end != previous_end) { 103 return Err(ConflictingEnd { 104 previous_end, 105 conflicting_end: end, 106 }); 107 } 108 } 109 110 // get enough memory to store the de-fragmented 111 let required_len = usize::from(end); 112 if self.data.len() < required_len { 113 if self.data.capacity() < required_len 114 && self 115 .data 116 .try_reserve(required_len - self.data.len()) 117 .is_err() 118 { 119 return Err(AllocationFailure { len: required_len }); 120 } 121 unsafe { 122 self.data.set_len(required_len); 123 } 124 } 125 126 // insert new data 127 let data_offset = usize::from(offset.byte_offset()); 128 self.data[data_offset..data_offset + payload.len()].copy_from_slice(payload); 129 130 // update sections 131 let mut new_section = IpFragRange { 132 start: offset.byte_offset(), 133 end, 134 }; 135 136 // merge overlapping section into new section and remove them 137 self.sections.retain(|it| -> bool { 138 if let Some(merged) = new_section.merge(*it) { 139 new_section = merged; 140 false 141 } else { 142 true 143 } 144 }); 145 self.sections.push(new_section); 146 147 // set end 148 if false == more_fragments { 149 self.end = Some(end); 150 // restrict the length based on the length 151 unsafe { 152 // SAFETY: Safe as the length has previously been checked to be at least "end" long 153 self.data.set_len(usize::from(end)); 154 } 155 } 156 157 Ok(()) 158 } 159 160 /// Returns true if the fragmented data is completed. is_complete(&self) -> bool161 pub fn is_complete(&self) -> bool { 162 self.end.is_some() && 1 == self.sections.len() && 0 == self.sections[0].start 163 } 164 165 /// Consume the [`IpDefragBuf`] and return the buffers. 166 #[inline] take_bufs(self) -> (Vec<u8>, Vec<IpFragRange>)167 pub fn take_bufs(self) -> (Vec<u8>, Vec<IpFragRange>) { 168 (self.data, self.sections) 169 } 170 } 171 172 #[cfg(test)] 173 mod test { 174 use super::*; 175 use std::{format, vec}; 176 177 #[test] debug_clone_eq()178 fn debug_clone_eq() { 179 let buf = IpDefragBuf::new(IpNumber::UDP, Vec::new(), Vec::new()); 180 let _ = format!("{:?}", buf); 181 assert_eq!(buf, buf.clone()); 182 assert_eq!(buf.cmp(&buf), core::cmp::Ordering::Equal); 183 assert_eq!(buf.partial_cmp(&buf), Some(core::cmp::Ordering::Equal)); 184 185 use core::hash::{Hash, Hasher}; 186 use std::collections::hash_map::DefaultHasher; 187 let h1 = { 188 let mut h = DefaultHasher::new(); 189 buf.hash(&mut h); 190 h.finish() 191 }; 192 let h2 = { 193 let mut h = DefaultHasher::new(); 194 buf.clone().hash(&mut h); 195 h.finish() 196 }; 197 assert_eq!(h1, h2); 198 } 199 200 #[test] new()201 fn new() { 202 let actual = IpDefragBuf::new( 203 IpNumber::UDP, 204 vec![1], 205 vec![IpFragRange { start: 0, end: 1 }], 206 ); 207 assert_eq!(actual.ip_number(), IpNumber::UDP); 208 assert!(actual.data().is_empty()); 209 assert!(actual.sections().is_empty()); 210 assert!(actual.end().is_none()); 211 } 212 213 /// Returns a u8 vec counting up from "start" until len is reached (truncating bits greater then u8). sequence(start: usize, len: usize) -> Vec<u8>214 fn sequence(start: usize, len: usize) -> Vec<u8> { 215 let mut result = Vec::with_capacity(len); 216 for i in start..start + len { 217 result.push((i & 0xff) as u8); 218 } 219 result 220 } 221 222 #[rustfmt::skip] 223 #[test] add()224 fn add() { 225 use IpDefragError::*; 226 227 // normal reconstruction 228 { 229 let mut buffer = IpDefragBuf::new(IpNumber::UDP, Vec::new(), Vec::new()); 230 231 let actions = [ 232 (false, (0, true, &sequence(0,16))), 233 (false, (16, true, &sequence(16,32))), 234 (true, (48, false, &sequence(48,16))), 235 ]; 236 for a in actions { 237 assert!(0 == (a.1.0 % 8)); 238 buffer.add( 239 IpFragOffset::try_new(a.1.0 / 8).unwrap(), 240 a.1.1, 241 a.1.2 242 ).unwrap(); 243 assert_eq!(a.0, buffer.is_complete()); 244 } 245 let (payload, _) = buffer.take_bufs(); 246 assert_eq!(&payload, &sequence(0,16*4)); 247 } 248 249 // overlapping reconstruction 250 { 251 let mut buffer = IpDefragBuf::new(IpNumber::UDP, Vec::new(), Vec::new()); 252 253 let actions = [ 254 (false, (0, true, sequence(0,16))), 255 // will be overwritten 256 (false, (32, true, sequence(0,16))), 257 // overwrites 258 (false, (32, false, sequence(32,16))), 259 // completes 260 (true, (16, true, sequence(16,16))), 261 ]; 262 for a in actions { 263 assert!(0 == (a.1.0 % 8)); 264 buffer.add( 265 IpFragOffset::try_new(a.1.0 / 8).unwrap(), 266 a.1.1, 267 &a.1.2 268 ).unwrap(); 269 assert_eq!(a.0, buffer.is_complete()); 270 } 271 let (payload, _) = buffer.take_bufs(); 272 assert_eq!(&payload, &sequence(0,16*3)); 273 } 274 275 // reverse order 276 { 277 let mut buffer = IpDefragBuf::new(IpNumber::UDP, Vec::new(), Vec::new()); 278 279 let actions = [ 280 (false, (48, false, &sequence(48,16))), 281 (false, (16, true, &sequence(16,32))), 282 (true, (0, true, &sequence(0,16))), 283 ]; 284 for a in actions { 285 assert!(0 == (a.1.0 % 8)); 286 buffer.add( 287 IpFragOffset::try_new(a.1.0 / 8).unwrap(), 288 a.1.1, 289 &a.1.2 290 ).unwrap(); 291 assert_eq!(a.0, buffer.is_complete()); 292 } 293 let (payload, _) = buffer.take_bufs(); 294 assert_eq!(&payload, &sequence(0,16*4)); 295 } 296 297 // error packet bigger then max (payload len only) 298 { 299 let mut buffer = IpDefragBuf::new(IpNumber::UDP, Vec::new(), Vec::new()); 300 let payload_len = usize::from(u16::MAX) + 1; 301 assert_eq!( 302 SegmentTooBig { offset: IpFragOffset::try_new(0).unwrap(), payload_len, max: u16::MAX }, 303 buffer.add( 304 IpFragOffset::try_new(0).unwrap(), 305 true, 306 &sequence(0, payload_len) 307 ).unwrap_err() 308 ); 309 } 310 311 // error packet bigger then max (offset + payload len) 312 { 313 let mut buffer = IpDefragBuf::new(IpNumber::UDP, Vec::new(), Vec::new()); 314 let payload_len = usize::from(u16::MAX) - 32 - 16 + 1; 315 assert_eq!( 316 SegmentTooBig { offset: IpFragOffset::try_new((32 + 16)/8).unwrap(), payload_len, max: u16::MAX }, 317 buffer.add( 318 IpFragOffset::try_new((32 + 16)/8).unwrap(), 319 true, 320 &sequence(0,payload_len) 321 ).unwrap_err() 322 ); 323 } 324 325 // check packets that fill exactly to the max work 326 { 327 let mut buffer = IpDefragBuf::new(IpNumber::UDP, Vec::new(), Vec::new()); 328 329 let payload_len = usize::from(u16::MAX - 16); 330 assert_eq!( 331 Ok(()), 332 buffer.add( 333 IpFragOffset::try_new(16/8).unwrap(), 334 false, 335 &sequence(0, payload_len) 336 ) 337 ); 338 } 339 340 // packets conflicting with previously seen end 341 for bad_offset in 1..8 { 342 let mut buffer = IpDefragBuf::new(IpNumber::UDP, Vec::new(), Vec::new()); 343 assert_eq!( 344 UnalignedFragmentPayloadLen { 345 offset: IpFragOffset::try_new(48/8).unwrap(), 346 payload_len: bad_offset 347 }, 348 buffer.add( 349 IpFragOffset::try_new(48/8).unwrap(), 350 true, 351 &sequence(0, bad_offset) 352 ).unwrap_err() 353 ); 354 } 355 356 // test that conflicting ends trigger errors (received a different end) 357 { 358 let mut buffer = IpDefragBuf::new(IpNumber::UDP, Vec::new(), Vec::new()); 359 360 // setup an end (aka no more segements) 361 buffer.add( 362 IpFragOffset::try_new(32/8).unwrap(), 363 false, 364 &sequence(32,16) 365 ).unwrap(); 366 367 // test that a "non end" going over the end package triggers an error 368 assert_eq!( 369 ConflictingEnd { previous_end: 32 + 16, conflicting_end: 48 + 16 }, 370 buffer.add( 371 IpFragOffset::try_new(48/8).unwrap(), 372 true, 373 &sequence(48,16) 374 ).unwrap_err() 375 ); 376 377 // test that a new end at an earlier position triggers an error 378 assert_eq!( 379 ConflictingEnd { previous_end: 32 + 16, conflicting_end: 16 + 16 }, 380 buffer.add( 381 IpFragOffset::try_new(16/8).unwrap(), 382 false, 383 &sequence(16,16) 384 ).unwrap_err() 385 ); 386 } 387 } 388 } 389