1 use super::*;
2
3 use crate::test_gens::*;
4 use alloc::{vec, vec::Vec};
5 use proptest::prelude::*;
6
7 #[derive(Clone, Debug, Eq, PartialEq)]
8 struct ComponentTest {
9 link: Option<LinkHeader>,
10 vlan: Option<VlanHeader>,
11 net: Option<NetHeaders>,
12 transport: Option<TransportHeader>,
13 payload: Vec<u8>,
14 }
15
16 static VLAN_ETHER_TYPES: &'static [EtherType] = &[
17 EtherType::VLAN_TAGGED_FRAME,
18 EtherType::PROVIDER_BRIDGING,
19 EtherType::VLAN_DOUBLE_TAGGED_FRAME,
20 ];
21
22 impl ComponentTest {
serialize(&self) -> Vec<u8>23 fn serialize(&self) -> Vec<u8> {
24 let mut buffer = Vec::<u8>::with_capacity(
25 match &self.link {
26 Some(header) => header.header_len(),
27 None => 0,
28 } + match &self.vlan {
29 Some(header) => header.header_len(),
30 None => 0,
31 } + match &self.net {
32 Some(headers) => headers.header_len(),
33 None => 0,
34 } + match &self.transport {
35 Some(header) => header.header_len(),
36 None => 0,
37 } + self.payload.len(),
38 );
39
40 //fill all the elements
41 match &self.link {
42 Some(header) => header.write(&mut buffer).unwrap(),
43 None => {}
44 }
45 use crate::VlanHeader::*;
46 match &self.vlan {
47 Some(Single(header)) => header.write(&mut buffer).unwrap(),
48 Some(Double(header)) => header.write(&mut buffer).unwrap(),
49 None => {}
50 }
51 match &self.net {
52 Some(NetHeaders::Ipv4(header, exts)) => {
53 header.write_raw(&mut buffer).unwrap();
54 exts.write(&mut buffer, header.protocol).unwrap();
55 }
56 Some(NetHeaders::Ipv6(header, exts)) => {
57 header.write(&mut buffer).unwrap();
58 exts.write(&mut buffer, header.next_header).unwrap();
59 }
60 Some(NetHeaders::Arp(arp)) => {
61 arp.write(&mut buffer).unwrap();
62 }
63 None => {}
64 }
65 match &self.transport {
66 Some(TransportHeader::Icmpv6(header)) => header.write(&mut buffer).unwrap(),
67 Some(TransportHeader::Icmpv4(header)) => header.write(&mut buffer).unwrap(),
68 Some(TransportHeader::Udp(header)) => header.write(&mut buffer).unwrap(),
69 Some(TransportHeader::Tcp(header)) => header.write(&mut buffer).unwrap(),
70 None => {}
71 }
72 use std::io::Write;
73 buffer.write(&self.payload[..]).unwrap();
74 buffer
75 }
76
77 /// Serialize the headers & payload specified in the headers and check that
78 /// the different decoding & slicing methods for entire packets work correctly.
79 ///
80 /// The following functions will be checked if they work correctly:
81 /// * `SlicedPacket::from_ethernet`
82 /// * `SlicedPacket::from_ip`
83 /// * `PacketHeaders::from_ethernet_slice`
84 /// * `PacketHeaders::from_ip_slice`
run(&self)85 fn run(&self) {
86 // clone the test so the length fields can be adapted
87 let mut test = self.clone();
88
89 // set the payload length
90 if let Some(net) = test.net.as_mut() {
91 match net {
92 NetHeaders::Ipv4(ipv4, exts) => {
93 ipv4.set_payload_len(
94 exts.header_len()
95 + self.transport.as_ref().map_or(0, |t| t.header_len())
96 + self.payload.len(),
97 )
98 .unwrap();
99 }
100 NetHeaders::Ipv6(ipv6, exts) => {
101 ipv6.set_payload_length(
102 exts.header_len()
103 + self.transport.as_ref().map_or(0, |t| t.header_len())
104 + self.payload.len(),
105 )
106 .unwrap();
107 }
108 NetHeaders::Arp(_) => {}
109 }
110 }
111 if let Some(TransportHeader::Udp(udp)) = test.transport.as_mut() {
112 udp.length = udp.header_len_u16() + self.payload.len() as u16;
113 }
114
115 //packet with ethernet2 & vlan headers
116 {
117 //serialize to buffer
118 let buffer = test.serialize();
119
120 // PacketHeaders::from_ethernet_slice
121 test.assert_headers(PacketHeaders::from_ethernet_slice(&buffer).unwrap());
122
123 // SlicedPacket::from_ethernet
124 test.assert_sliced_packet(SlicedPacket::from_ethernet(&buffer).unwrap());
125
126 // create unexpected end of slice errors for the different headers
127 for len in test.invalid_ser_lengths() {
128 if let Some(len) = len {
129 assert!(PacketHeaders::from_ethernet_slice(&buffer[..len]).is_err());
130 assert!(SlicedPacket::from_ethernet(&buffer[..len]).is_err());
131 }
132 }
133 }
134
135 // packet data starting right after the link layer (tests from_ether_type functions)
136 {
137 // remove the link layer
138 let ether_down = {
139 let mut ether_down = test.clone();
140 ether_down.link = None;
141 ether_down
142 };
143
144 // serialize to buffer
145 let buffer = ether_down.serialize();
146
147 // PacketHeaders::from_ether_type
148 ether_down.assert_headers(
149 PacketHeaders::from_ether_type(
150 test.link.clone().unwrap().ethernet2().unwrap().ether_type,
151 &buffer[..],
152 )
153 .unwrap(),
154 );
155
156 // SlicedPacket::from_ether_type
157 ether_down.assert_sliced_packet(
158 SlicedPacket::from_ether_type(
159 test.link.clone().unwrap().ethernet2().unwrap().ether_type,
160 &buffer[..],
161 )
162 .unwrap(),
163 );
164
165 // create unexpected end of slice errors for the different headers
166 for len in ether_down.invalid_ser_lengths() {
167 if let Some(len) = len {
168 assert!(PacketHeaders::from_ether_type(
169 test.link.clone().unwrap().ethernet2().unwrap().ether_type,
170 &buffer[..len]
171 )
172 .is_err());
173 assert!(SlicedPacket::from_ether_type(
174 test.link.clone().unwrap().ethernet2().unwrap().ether_type,
175 &buffer[..len]
176 )
177 .is_err());
178 }
179 }
180 }
181
182 // packet from the internet layer down (without ethernet2 & vlan headers)
183 if test.net.as_ref().map(|v| v.is_ip()).unwrap_or(false) {
184 // serialize from the ip layer downwards
185 let ip_down = {
186 let mut ip_down = test.clone();
187 ip_down.link = None;
188 ip_down.vlan = None;
189 ip_down
190 };
191
192 // serialize to buffer
193 let buffer = ip_down.serialize();
194
195 // PacketHeaders::from_ip_slice
196 ip_down.assert_headers(PacketHeaders::from_ip_slice(&buffer).unwrap());
197
198 // SlicedPacket::from_ip
199 ip_down.assert_sliced_packet(SlicedPacket::from_ip(&buffer).unwrap());
200
201 // create unexpected end of slice errors for the different headers
202 for len in ip_down.invalid_ser_lengths() {
203 if let Some(len) = len {
204 assert!(PacketHeaders::from_ip_slice(&buffer[..len]).is_err());
205 assert!(SlicedPacket::from_ip(&buffer[..len]).is_err());
206 }
207 }
208 }
209 }
210
211 /// Creates slice lengths at which an too short slice error
212 /// should be triggered.
invalid_ser_lengths(&self) -> [Option<usize>; 12]213 fn invalid_ser_lengths(&self) -> [Option<usize>; 12] {
214 struct Builder {
215 result: [Option<usize>; 12],
216 next_index: usize,
217 offset: usize,
218 }
219
220 impl Builder {
add(&mut self, header_len: usize)221 fn add(&mut self, header_len: usize) {
222 self.offset += header_len;
223 self.result[self.next_index] = Some(self.offset - 1);
224 self.next_index += 1;
225 }
226 }
227
228 let mut builder = Builder {
229 result: [None; 12],
230 next_index: 0,
231 offset: 0,
232 };
233
234 if let Some(link) = self.link.as_ref() {
235 builder.add(link.header_len());
236 }
237 if let Some(vlan) = self.vlan.as_ref() {
238 use VlanHeader::*;
239 match vlan {
240 Single(single) => builder.add(single.header_len()),
241 Double(double) => {
242 builder.add(double.outer.header_len());
243 builder.add(double.inner.header_len());
244 }
245 }
246 }
247 if let Some(net) = self.net.as_ref() {
248 use NetHeaders::*;
249 match net {
250 Ipv4(header, exts) => {
251 builder.add(header.header_len());
252 if let Some(auth) = exts.auth.as_ref() {
253 builder.add(auth.header_len());
254 }
255 }
256 Ipv6(header, exts) => {
257 builder.add(header.header_len());
258 if let Some(e) = exts.hop_by_hop_options.as_ref() {
259 builder.add(e.header_len());
260 }
261 if let Some(e) = exts.destination_options.as_ref() {
262 builder.add(e.header_len());
263 }
264 if let Some(routing) = exts.routing.as_ref() {
265 builder.add(routing.routing.header_len());
266 if let Some(e) = routing.final_destination_options.as_ref() {
267 builder.add(e.header_len());
268 }
269 }
270 if let Some(e) = exts.fragment.as_ref() {
271 builder.add(e.header_len());
272 }
273 if let Some(e) = exts.auth.as_ref() {
274 builder.add(e.header_len());
275 }
276 }
277 Arp(arp) => {
278 builder.add(arp.packet_len());
279 }
280 }
281 }
282 if let Some(transport) = self.transport.as_ref() {
283 builder.add(transport.header_len());
284 }
285
286 builder.result
287 }
288
assert_headers(&self, actual: PacketHeaders)289 fn assert_headers(&self, actual: PacketHeaders) {
290 assert_eq!(self.link, actual.link);
291 assert_eq!(self.vlan, actual.vlan);
292 assert_eq!(self.net, self.net);
293 assert_eq!(self.transport, actual.transport);
294 assert_eq!(self.payload[..], actual.payload.slice()[..]);
295 }
296
assert_sliced_packet(&self, result: SlicedPacket)297 fn assert_sliced_packet(&self, result: SlicedPacket) {
298 //assert identity to touch the derives (code coverage hack)
299 assert_eq!(result, result);
300
301 //ethernet & vlan
302 assert_eq!(
303 self.link,
304 match result.link.as_ref() {
305 Some(l) => match l {
306 LinkSlice::Ethernet2(e) => Some(LinkHeader::Ethernet2(e.to_header())),
307 LinkSlice::LinuxSll(e) => Some(LinkHeader::LinuxSll(e.to_header())),
308 LinkSlice::EtherPayload(_) => None,
309 LinkSlice::LinuxSllPayload(_) => None,
310 },
311 None => None,
312 }
313 ); //.unwrap_or(None).map(|ref x| x.to_header()));
314 assert_eq!(self.vlan, result.vlan.as_ref().map(|ref x| x.to_header()));
315
316 //ip
317 assert_eq!(self.net, {
318 use crate::NetSlice::*;
319 match result.net.as_ref() {
320 Some(Ipv4(actual)) => Some(NetHeaders::Ipv4(
321 actual.header().to_header(),
322 Ipv4Extensions {
323 auth: actual.extensions().auth.map(|ref x| x.to_header()),
324 },
325 )),
326 Some(Ipv6(actual)) => Some(NetHeaders::Ipv6(
327 actual.header().to_header(),
328 Ipv6Extensions::from_slice(
329 actual.header().next_header(),
330 actual.extensions().slice(),
331 )
332 .unwrap()
333 .0,
334 )),
335 Some(Arp(arp)) => Some(NetHeaders::Arp(arp.to_packet())),
336 None => None,
337 }
338 });
339
340 // transport header
341 assert_eq!(
342 self.transport,
343 match result.transport.as_ref() {
344 Some(TransportSlice::Icmpv4(actual)) =>
345 Some(TransportHeader::Icmpv4(actual.header())),
346 Some(TransportSlice::Icmpv6(actual)) =>
347 Some(TransportHeader::Icmpv6(actual.header())),
348 Some(TransportSlice::Udp(actual)) => Some(TransportHeader::Udp(actual.to_header())),
349 Some(TransportSlice::Tcp(actual)) => Some(TransportHeader::Tcp(actual.to_header())),
350 None => None,
351 }
352 );
353 // additional check for the contents of Unknown
354 if self.transport.is_none() {
355 match result.transport.as_ref() {
356 None => assert!(result.transport.is_none()),
357 _ => unreachable!(),
358 }
359 }
360
361 //payload
362 match result.transport.as_ref() {
363 Some(TransportSlice::Icmpv4(icmpv4)) => {
364 assert_eq!(&self.payload[..], icmpv4.payload());
365 }
366 Some(TransportSlice::Icmpv6(icmpv6)) => {
367 assert_eq!(&self.payload[..], icmpv6.payload());
368 }
369 Some(TransportSlice::Udp(udp)) => {
370 assert_eq!(&self.payload[..], udp.payload());
371 }
372 Some(TransportSlice::Tcp(tcp)) => {
373 assert_eq!(&self.payload[..], tcp.payload());
374 }
375 // check ip next
376 None => {
377 if let Some(ip) = result.net.as_ref() {
378 assert_eq!(
379 &self.payload[..],
380 match ip {
381 NetSlice::Ipv4(s) => s.payload.payload,
382 NetSlice::Ipv6(s) => s.payload.payload,
383 NetSlice::Arp(_) => &[],
384 }
385 );
386 } else {
387 if let Some(vlan) = result.vlan.as_ref() {
388 assert_eq!(&self.payload[..], vlan.payload().payload);
389 } else {
390 if let Some(LinkSlice::Ethernet2(eth)) = result.link.as_ref() {
391 assert_eq!(&self.payload[..], eth.payload().payload);
392 }
393 }
394 }
395 }
396 }
397 }
398
run_vlan( &self, outer_vlan: &SingleVlanHeader, inner_vlan: &SingleVlanHeader, arp: &ArpPacket, ipv4: &Ipv4Header, ipv4_ext: &Ipv4Extensions, ipv6: &Ipv6Header, ipv6_ext: &Ipv6Extensions, udp: &UdpHeader, tcp: &TcpHeader, icmpv4: &Icmpv4Header, icmpv6: &Icmpv6Header, )399 fn run_vlan(
400 &self,
401 outer_vlan: &SingleVlanHeader,
402 inner_vlan: &SingleVlanHeader,
403 arp: &ArpPacket,
404 ipv4: &Ipv4Header,
405 ipv4_ext: &Ipv4Extensions,
406 ipv6: &Ipv6Header,
407 ipv6_ext: &Ipv6Extensions,
408 udp: &UdpHeader,
409 tcp: &TcpHeader,
410 icmpv4: &Icmpv4Header,
411 icmpv6: &Icmpv6Header,
412 ) {
413 let setup_single = |ether_type: EtherType| -> ComponentTest {
414 let mut result = self.clone();
415 result.vlan = Some(VlanHeader::Single({
416 let mut v = inner_vlan.clone();
417 v.ether_type = ether_type;
418 v
419 }));
420 result
421 };
422 let setup_double =
423 |outer_ether_type: EtherType, inner_ether_type: EtherType| -> ComponentTest {
424 let mut result = self.clone();
425 result.vlan = Some(VlanHeader::Double(DoubleVlanHeader {
426 outer: {
427 let mut v = outer_vlan.clone();
428 v.ether_type = outer_ether_type;
429 v
430 },
431 inner: {
432 let mut v = inner_vlan.clone();
433 v.ether_type = inner_ether_type;
434 v
435 },
436 }));
437 result
438 };
439
440 //single
441 setup_single(inner_vlan.ether_type).run();
442 setup_single(ether_type::ARP).run_arp(arp);
443 setup_single(ether_type::IPV4).run_ipv4(ipv4, ipv4_ext, udp, tcp, icmpv4, icmpv6);
444 setup_single(ether_type::IPV6).run_ipv6(ipv6, ipv6_ext, udp, tcp, icmpv4, icmpv6);
445
446 //double
447 for ether_type in VLAN_ETHER_TYPES {
448 setup_double(*ether_type, inner_vlan.ether_type).run();
449 setup_double(*ether_type, ether_type::ARP).run_arp(arp);
450 setup_double(*ether_type, ether_type::IPV4)
451 .run_ipv4(ipv4, ipv4_ext, udp, tcp, icmpv4, icmpv6);
452 setup_double(*ether_type, ether_type::IPV6)
453 .run_ipv6(ipv6, ipv6_ext, udp, tcp, icmpv4, icmpv6);
454 }
455 }
456
run_arp(&self, arp: &ArpPacket)457 fn run_arp(&self, arp: &ArpPacket) {
458 let mut test = self.clone();
459 test.net = Some(NetHeaders::Arp(arp.clone()));
460 test.payload.clear();
461 test.run();
462 }
463
run_ipv4( &self, ip: &Ipv4Header, ip_exts: &Ipv4Extensions, udp: &UdpHeader, tcp: &TcpHeader, icmpv4: &Icmpv4Header, icmpv6: &Icmpv6Header, )464 fn run_ipv4(
465 &self,
466 ip: &Ipv4Header,
467 ip_exts: &Ipv4Extensions,
468 udp: &UdpHeader,
469 tcp: &TcpHeader,
470 icmpv4: &Icmpv4Header,
471 icmpv6: &Icmpv6Header,
472 ) {
473 // fragmenting
474 {
475 let mut test = self.clone();
476 test.net = Some({
477 let mut frag = ip.clone();
478 if false == frag.is_fragmenting_payload() {
479 frag.more_fragments = true;
480 }
481 let mut ip_exts = ip_exts.clone();
482 frag.protocol = ip_exts.set_next_headers(ip.protocol);
483 NetHeaders::Ipv4(frag, ip_exts.clone())
484 });
485
486 // run without transport header
487 test.run();
488 }
489
490 // non fragmenting
491 {
492 let mut test = self.clone();
493 test.net = Some({
494 let mut non_frag = ip.clone();
495 non_frag.more_fragments = false;
496 non_frag.fragment_offset = 0.try_into().unwrap();
497 let mut ip_exts = ip_exts.clone();
498 non_frag.protocol = ip_exts.set_next_headers(ip.protocol);
499 NetHeaders::Ipv4(non_frag, ip_exts)
500 });
501 test.run_transport(udp, tcp, icmpv4, icmpv6);
502 }
503 }
504
run_ipv6( &self, ip: &Ipv6Header, ip_exts: &Ipv6Extensions, udp: &UdpHeader, tcp: &TcpHeader, icmpv4: &Icmpv4Header, icmpv6: &Icmpv6Header, )505 fn run_ipv6(
506 &self,
507 ip: &Ipv6Header,
508 ip_exts: &Ipv6Extensions,
509 udp: &UdpHeader,
510 tcp: &TcpHeader,
511 icmpv4: &Icmpv4Header,
512 icmpv6: &Icmpv6Header,
513 ) {
514 // fragmenting
515 {
516 let mut test = self.clone();
517 test.net = Some({
518 let mut frag = ip_exts.clone();
519 if let Some(frag) = frag.fragment.as_mut() {
520 if false == frag.is_fragmenting_payload() {
521 frag.more_fragments = true;
522 }
523 } else {
524 frag.fragment = Some(Ipv6FragmentHeader::new(
525 ip_number::UDP,
526 IpFragOffset::ZERO,
527 true,
528 0,
529 ));
530 }
531 let mut ip = ip.clone();
532 ip.next_header = frag.set_next_headers(ip.next_header);
533 NetHeaders::Ipv6(ip, frag)
534 });
535 test.run();
536 }
537
538 // non fragmenting
539 {
540 let mut test = self.clone();
541 test.net = Some({
542 let mut non_frag = ip_exts.clone();
543 non_frag.fragment = None;
544 let mut ip = ip.clone();
545 ip.next_header = non_frag.set_next_headers(ip.next_header);
546 NetHeaders::Ipv6(ip, non_frag)
547 });
548 test.run_transport(udp, tcp, icmpv4, icmpv6);
549 }
550 }
551
run_transport( &self, udp: &UdpHeader, tcp: &TcpHeader, icmpv4: &Icmpv4Header, icmpv6: &Icmpv6Header, )552 fn run_transport(
553 &self,
554 udp: &UdpHeader,
555 tcp: &TcpHeader,
556 icmpv4: &Icmpv4Header,
557 icmpv6: &Icmpv6Header,
558 ) {
559 // unknown transport layer
560 self.run();
561
562 // udp
563 {
564 let mut test = self.clone();
565 test.net
566 .as_mut()
567 .unwrap()
568 .try_set_next_headers(ip_number::UDP)
569 .unwrap();
570 test.transport = Some(TransportHeader::Udp(udp.clone()));
571 test.run()
572 }
573
574 // tcp
575 {
576 let mut test = self.clone();
577 test.net
578 .as_mut()
579 .unwrap()
580 .try_set_next_headers(ip_number::TCP)
581 .unwrap();
582 test.transport = Some(TransportHeader::Tcp(tcp.clone()));
583 test.run()
584 }
585
586 // icmpv4
587 if let Some(payload_size) = icmpv4.fixed_payload_size() {
588 let mut test = self.clone();
589 test.net
590 .as_mut()
591 .unwrap()
592 .try_set_next_headers(ip_number::ICMP)
593 .unwrap();
594 test.transport = Some(TransportHeader::Icmpv4(icmpv4.clone()));
595 // resize the payload in case it does not have to be as big
596 test.payload.resize(payload_size, 0);
597 test.run()
598 } else {
599 let mut test = self.clone();
600 test.net
601 .as_mut()
602 .unwrap()
603 .try_set_next_headers(ip_number::ICMP)
604 .unwrap();
605 test.transport = Some(TransportHeader::Icmpv4(icmpv4.clone()));
606 test.run()
607 }
608
609 // icmpv6
610 if let Some(payload_size) = icmpv6.fixed_payload_size() {
611 let mut test = self.clone();
612 test.net
613 .as_mut()
614 .unwrap()
615 .try_set_next_headers(ip_number::IPV6_ICMP)
616 .unwrap();
617 test.transport = Some(TransportHeader::Icmpv6(icmpv6.clone()));
618 // resize the payload in case it does not have to be as big
619 test.payload.resize(payload_size, 0);
620 test.run()
621 } else {
622 let mut test = self.clone();
623 test.net
624 .as_mut()
625 .unwrap()
626 .try_set_next_headers(ip_number::IPV6_ICMP)
627 .unwrap();
628 test.transport = Some(TransportHeader::Icmpv6(icmpv6.clone()));
629 test.run()
630 }
631 }
632 }
633
634 proptest! {
635 ///Test that all known packet compositions are parsed correctly.
636 #[test]
637 #[cfg_attr(miri, ignore)] // vec allocation reduces miri runspeed too much
638 fn test_compositions(ref eth in ethernet_2_unknown(),
639 ref vlan_outer in vlan_single_unknown(),
640 ref vlan_inner in vlan_single_unknown(),
641 ref ipv4 in ipv4_unknown(),
642 ref ipv4_exts in ipv4_extensions_unknown(),
643 ref ipv6 in ipv6_unknown(),
644 ref ipv6_exts in ipv6_extensions_unknown(),
645 ref arp in arp_packet_any(),
646 ref udp in udp_any(),
647 ref tcp in tcp_any(),
648 ref icmpv4 in icmpv4_header_any(),
649 ref icmpv6 in icmpv6_header_any(),
650 ref payload in proptest::collection::vec(any::<u8>(), 0..1024))
651 {
652 let setup_eth = | ether_type: EtherType | -> ComponentTest {
653 ComponentTest {
654 payload: payload.clone(),
655 link: Some({
656 let mut result = eth.clone();
657 result.ether_type = ether_type;
658 LinkHeader::Ethernet2(result)
659 }),
660 vlan: None,
661 net: None,
662 transport: None
663 }
664 };
665
666 //ethernet 2: standalone, ipv4, ipv6
667 setup_eth(eth.ether_type).run();
668 setup_eth(ether_type::ARP).run_arp(arp);
669 setup_eth(ether_type::IPV4).run_ipv4(ipv4, ipv4_exts, udp, tcp, icmpv4, icmpv6);
670 setup_eth(ether_type::IPV6).run_ipv6(ipv6, ipv6_exts, udp, tcp, icmpv4, icmpv6);
671
672 //vlans
673 for ether_type in VLAN_ETHER_TYPES {
674 setup_eth(*ether_type).run_vlan(vlan_outer, vlan_inner, arp, ipv4, ipv4_exts, ipv6, ipv6_exts, udp, tcp, icmpv4, icmpv6);
675 }
676 }
677 }
678
679 ///Test that assert_sliced_packet is panicking when the ethernet header is missing
680 #[test]
681 #[should_panic]
test_packet_slicing_panics()682 fn test_packet_slicing_panics() {
683 let s = SlicedPacket {
684 link: None,
685 vlan: None,
686 net: None,
687 transport: None,
688 };
689 ComponentTest {
690 link: Some(LinkHeader::Ethernet2(Ethernet2Header {
691 source: [0; 6],
692 destination: [0; 6],
693 ether_type: 0.into(),
694 })),
695 vlan: None,
696 net: None,
697 transport: None,
698 payload: vec![],
699 }
700 .assert_sliced_packet(s);
701 }
702