1 #![allow(clippy::let_unit_value)]
2 #![warn(clippy::absolute_paths)]
3
4 mod common;
5
6 use std::collections::HashSet;
7 use std::env::current_exe;
8 use std::ffi::c_int;
9 use std::ffi::c_void;
10 use std::ffi::OsStr;
11 use std::fs;
12 use std::hint;
13 use std::io;
14 use std::io::Read;
15 use std::mem::size_of;
16 use std::mem::size_of_val;
17 use std::os::unix::io::AsFd;
18 use std::path::Path;
19 use std::path::PathBuf;
20 use std::ptr;
21 use std::ptr::addr_of;
22 use std::slice;
23 use std::sync::atomic::AtomicI32;
24 use std::sync::atomic::Ordering;
25 use std::sync::mpsc::channel;
26 use std::time::Duration;
27
28 use libbpf_rs::num_possible_cpus;
29 use libbpf_rs::AsRawLibbpf;
30 use libbpf_rs::Iter;
31 use libbpf_rs::Linker;
32 use libbpf_rs::MapCore;
33 use libbpf_rs::MapFlags;
34 use libbpf_rs::MapHandle;
35 use libbpf_rs::MapInfo;
36 use libbpf_rs::MapType;
37 use libbpf_rs::Object;
38 use libbpf_rs::ObjectBuilder;
39 use libbpf_rs::Program;
40 use libbpf_rs::ProgramInput;
41 use libbpf_rs::ProgramType;
42 use libbpf_rs::TracepointOpts;
43 use libbpf_rs::UprobeOpts;
44 use libbpf_rs::UsdtOpts;
45 use libbpf_rs::UserRingBuffer;
46 use plain::Plain;
47 use probe::probe;
48 use scopeguard::defer;
49 use tempfile::NamedTempFile;
50 use test_tag::tag;
51
52 use crate::common::bump_rlimit_mlock;
53 use crate::common::get_map;
54 use crate::common::get_map_mut;
55 use crate::common::get_prog_mut;
56 use crate::common::get_test_object;
57 use crate::common::get_test_object_path;
58 use crate::common::open_test_object;
59 use crate::common::with_ringbuffer;
60
61
62 #[tag(root)]
63 #[test]
test_object_build_and_load()64 fn test_object_build_and_load() {
65 bump_rlimit_mlock();
66
67 get_test_object("runqslower.bpf.o");
68 }
69
70 #[test]
test_object_build_from_memory()71 fn test_object_build_from_memory() {
72 let obj_path = get_test_object_path("runqslower.bpf.o");
73 let contents = fs::read(obj_path).expect("failed to read object file");
74 let mut builder = ObjectBuilder::default();
75 let obj = builder
76 .name("memory name")
77 .unwrap()
78 .open_memory(&contents)
79 .expect("failed to build object");
80 let name = obj.name().expect("failed to get object name");
81 assert!(name == "memory name");
82
83 let obj = unsafe { Object::from_ptr(obj.take_ptr()) };
84 let name = obj.name().expect("failed to get object name");
85 assert!(name == "memory name");
86 }
87
88 #[test]
test_object_build_from_memory_empty_name()89 fn test_object_build_from_memory_empty_name() {
90 let obj_path = get_test_object_path("runqslower.bpf.o");
91 let contents = fs::read(obj_path).expect("failed to read object file");
92 let mut builder = ObjectBuilder::default();
93 let obj = builder
94 .name("")
95 .unwrap()
96 .open_memory(&contents)
97 .expect("failed to build object");
98 let name = obj.name().expect("failed to get object name");
99 assert!(name.is_empty());
100
101 let obj = unsafe { Object::from_ptr(obj.take_ptr()) };
102 let name = obj.name().expect("failed to get object name");
103 assert!(name.is_empty());
104 }
105
106 /// Check that loading an object from an empty file fails as expected.
107 #[tag(root)]
108 #[test]
test_object_load_invalid()109 fn test_object_load_invalid() {
110 let empty_file = NamedTempFile::new().unwrap();
111 let _err = ObjectBuilder::default()
112 .debug(true)
113 .open_file(empty_file.path())
114 .unwrap_err();
115 }
116
117 #[test]
test_object_name()118 fn test_object_name() {
119 let obj_path = get_test_object_path("runqslower.bpf.o");
120 let mut builder = ObjectBuilder::default();
121 builder.name("test name").unwrap();
122 let obj = builder.open_file(obj_path).expect("failed to build object");
123 let obj_name = obj.name().expect("failed to get object name");
124 assert!(obj_name == "test name");
125 }
126
127 #[tag(root)]
128 #[test]
test_object_maps()129 fn test_object_maps() {
130 bump_rlimit_mlock();
131
132 let mut obj = get_test_object("runqslower.bpf.o");
133 let _map = get_map_mut(&mut obj, "start");
134 let _map = get_map_mut(&mut obj, "events");
135 assert!(!obj.maps().any(|map| map.name() == OsStr::new("asdf")));
136 }
137
138 #[tag(root)]
139 #[test]
test_object_maps_iter()140 fn test_object_maps_iter() {
141 bump_rlimit_mlock();
142
143 let obj = get_test_object("runqslower.bpf.o");
144 for map in obj.maps() {
145 eprintln!("{:?}", map.name());
146 }
147 // This will include .rodata and .bss, so our expected count is 4, not 2
148 assert!(obj.maps().count() == 4);
149 }
150
151 #[tag(root)]
152 #[test]
test_object_map_key_value_size()153 fn test_object_map_key_value_size() {
154 bump_rlimit_mlock();
155
156 let mut obj = get_test_object("runqslower.bpf.o");
157 let start = get_map_mut(&mut obj, "start");
158
159 assert!(start.lookup(&[1, 2, 3, 4, 5], MapFlags::empty()).is_err());
160 assert!(start.delete(&[1]).is_err());
161 assert!(start.lookup_and_delete(&[1, 2, 3, 4, 5]).is_err());
162 assert!(start
163 .update(&[1, 2, 3, 4, 5], &[1], MapFlags::empty())
164 .is_err());
165 }
166
167 #[tag(root)]
168 #[test]
test_object_map_update_batch()169 fn test_object_map_update_batch() {
170 bump_rlimit_mlock();
171
172 let mut obj = get_test_object("runqslower.bpf.o");
173 let start = get_map_mut(&mut obj, "start");
174
175 let key1 = 1u32.to_ne_bytes();
176 let key2 = 2u32.to_ne_bytes();
177 let key3 = 3u32.to_ne_bytes();
178 let key4 = 4u32.to_ne_bytes();
179
180 let value1 = 369u64.to_ne_bytes();
181 let value2 = 258u64.to_ne_bytes();
182 let value3 = 147u64.to_ne_bytes();
183 let value4 = 159u64.to_ne_bytes();
184
185 let batch_key1 = key1.into_iter().chain(key2).collect::<Vec<_>>();
186 let batch_value1 = value1.into_iter().chain(value2).collect::<Vec<_>>();
187
188 let batch_key2 = key2.into_iter().chain(key3).chain(key4).collect::<Vec<_>>();
189 let batch_value2 = value2
190 .into_iter()
191 .chain(value3)
192 .chain(value4)
193 .collect::<Vec<_>>();
194
195 // Update batch with wrong key size
196 assert!(start
197 .update_batch(
198 &[1, 2, 3],
199 &batch_value1,
200 2,
201 MapFlags::ANY,
202 MapFlags::NO_EXIST
203 )
204 .is_err());
205
206 // Update batch with wrong value size
207 assert!(start
208 .update_batch(
209 &batch_key1,
210 &[1, 2, 3],
211 2,
212 MapFlags::ANY,
213 MapFlags::NO_EXIST
214 )
215 .is_err());
216
217 // Update batch with wrong count.
218 assert!(start
219 .update_batch(
220 &batch_key1,
221 &batch_value1,
222 1,
223 MapFlags::ANY,
224 MapFlags::NO_EXIST
225 )
226 .is_err());
227
228 // Update batch with 1 key.
229 assert!(start
230 .update_batch(&key1, &value1, 1, MapFlags::ANY, MapFlags::NO_EXIST)
231 .is_ok());
232
233 // Update batch with multiple keys.
234 assert!(start
235 .update_batch(
236 &batch_key2,
237 &batch_value2,
238 3,
239 MapFlags::ANY,
240 MapFlags::NO_EXIST
241 )
242 .is_ok());
243
244 // Update batch with existing keys.
245 assert!(start
246 .update_batch(
247 &batch_key2,
248 &batch_value2,
249 3,
250 MapFlags::NO_EXIST,
251 MapFlags::NO_EXIST
252 )
253 .is_err());
254 }
255
256 #[tag(root)]
257 #[test]
test_object_map_delete_batch()258 fn test_object_map_delete_batch() {
259 bump_rlimit_mlock();
260
261 let mut obj = get_test_object("runqslower.bpf.o");
262 let start = get_map_mut(&mut obj, "start");
263
264 let key1 = 1u32.to_ne_bytes();
265 assert!(start
266 .update(&key1, &9999u64.to_ne_bytes(), MapFlags::ANY)
267 .is_ok());
268 let key2 = 2u32.to_ne_bytes();
269 assert!(start
270 .update(&key2, &42u64.to_ne_bytes(), MapFlags::ANY)
271 .is_ok());
272 let key3 = 3u32.to_ne_bytes();
273 assert!(start
274 .update(&key3, &18u64.to_ne_bytes(), MapFlags::ANY)
275 .is_ok());
276 let key4 = 4u32.to_ne_bytes();
277 assert!(start
278 .update(&key4, &1337u64.to_ne_bytes(), MapFlags::ANY)
279 .is_ok());
280
281 // Delete 1 incomplete key.
282 assert!(start
283 .delete_batch(&[0, 0, 1], 1, MapFlags::empty(), MapFlags::empty())
284 .is_err());
285 // Delete keys with wrong count.
286 assert!(start
287 .delete_batch(&key4, 2, MapFlags::empty(), MapFlags::empty())
288 .is_err());
289 // Delete 1 key successfully.
290 assert!(start
291 .delete_batch(&key4, 1, MapFlags::empty(), MapFlags::empty())
292 .is_ok());
293 // Delete remaining 3 keys.
294 let keys = key1.into_iter().chain(key2).chain(key3).collect::<Vec<_>>();
295 assert!(start
296 .delete_batch(&keys, 3, MapFlags::empty(), MapFlags::empty())
297 .is_ok());
298 // Map should be empty now.
299 assert!(start.keys().collect::<Vec<_>>().is_empty())
300 }
301
302 /// Test whether `MapInfo` works properly
303 #[tag(root)]
304 #[test]
test_map_info()305 pub fn test_map_info() {
306 #[allow(clippy::needless_update)]
307 let opts = libbpf_sys::bpf_map_create_opts {
308 sz: size_of::<libbpf_sys::bpf_map_create_opts>() as libbpf_sys::size_t,
309 map_flags: libbpf_sys::BPF_ANY,
310 btf_fd: 0,
311 btf_key_type_id: 0,
312 btf_value_type_id: 0,
313 btf_vmlinux_value_type_id: 0,
314 inner_map_fd: 0,
315 map_extra: 0,
316 numa_node: 0,
317 map_ifindex: 0,
318 // bpf_map_create_opts might have padding fields on some platform
319 ..Default::default()
320 };
321
322 let map = MapHandle::create(MapType::Hash, Some("simple_map"), 8, 64, 1024, &opts).unwrap();
323 let map_info = MapInfo::new(map.as_fd()).unwrap();
324 let name_received = map_info.name().unwrap();
325 assert_eq!(name_received, "simple_map");
326 assert_eq!(map_info.map_type(), MapType::Hash);
327 assert_eq!(map_info.flags() & MapFlags::ANY, MapFlags::ANY);
328
329 let map_info = &map_info.info;
330 assert_eq!(map_info.key_size, 8);
331 assert_eq!(map_info.value_size, 64);
332 assert_eq!(map_info.max_entries, 1024);
333 assert_eq!(map_info.btf_id, 0);
334 assert_eq!(map_info.btf_key_type_id, 0);
335 assert_eq!(map_info.btf_value_type_id, 0);
336 assert_eq!(map_info.btf_vmlinux_value_type_id, 0);
337 assert_eq!(map_info.map_extra, 0);
338 assert_eq!(map_info.ifindex, 0);
339 }
340
341 #[tag(root)]
342 #[test]
test_object_percpu_lookup()343 fn test_object_percpu_lookup() {
344 bump_rlimit_mlock();
345
346 let mut obj = get_test_object("percpu_map.bpf.o");
347 let map = get_map_mut(&mut obj, "percpu_map");
348 let res = map
349 .lookup_percpu(&(0_u32).to_ne_bytes(), MapFlags::ANY)
350 .expect("failed to lookup")
351 .expect("failed to find value for key");
352
353 assert_eq!(
354 res.len(),
355 num_possible_cpus().expect("must be one value per cpu")
356 );
357 assert_eq!(res[0].len(), size_of::<u32>());
358 }
359
360 #[tag(root)]
361 #[test]
test_object_percpu_invalid_lookup_fn()362 fn test_object_percpu_invalid_lookup_fn() {
363 bump_rlimit_mlock();
364
365 let mut obj = get_test_object("percpu_map.bpf.o");
366 let map = get_map_mut(&mut obj, "percpu_map");
367
368 assert!(map.lookup(&(0_u32).to_ne_bytes(), MapFlags::ANY).is_err());
369 }
370
371 #[tag(root)]
372 #[test]
test_object_percpu_update()373 fn test_object_percpu_update() {
374 bump_rlimit_mlock();
375
376 let mut obj = get_test_object("percpu_map.bpf.o");
377 let map = get_map_mut(&mut obj, "percpu_map");
378 let key = (0_u32).to_ne_bytes();
379
380 let mut vals: Vec<Vec<u8>> = Vec::new();
381 for i in 0..num_possible_cpus().unwrap() {
382 vals.push((i as u32).to_ne_bytes().to_vec());
383 }
384
385 map.update_percpu(&key, &vals, MapFlags::ANY)
386 .expect("failed to update map");
387
388 let res = map
389 .lookup_percpu(&key, MapFlags::ANY)
390 .expect("failed to lookup")
391 .expect("failed to find value for key");
392
393 assert_eq!(vals, res);
394 }
395
396 #[tag(root)]
397 #[test]
test_object_percpu_invalid_update_fn()398 fn test_object_percpu_invalid_update_fn() {
399 bump_rlimit_mlock();
400
401 let mut obj = get_test_object("percpu_map.bpf.o");
402 let map = get_map_mut(&mut obj, "percpu_map");
403 let key = (0_u32).to_ne_bytes();
404
405 let val = (1_u32).to_ne_bytes().to_vec();
406
407 assert!(map.update(&key, &val, MapFlags::ANY).is_err());
408 }
409
410 #[tag(root)]
411 #[test]
test_object_percpu_lookup_update()412 fn test_object_percpu_lookup_update() {
413 bump_rlimit_mlock();
414
415 let mut obj = get_test_object("percpu_map.bpf.o");
416 let map = get_map_mut(&mut obj, "percpu_map");
417 let key = (0_u32).to_ne_bytes();
418
419 let mut res = map
420 .lookup_percpu(&key, MapFlags::ANY)
421 .expect("failed to lookup")
422 .expect("failed to find value for key");
423
424 for e in res.iter_mut() {
425 e[0] &= 0xf0;
426 }
427
428 map.update_percpu(&key, &res, MapFlags::ANY)
429 .expect("failed to update after first lookup");
430
431 let res2 = map
432 .lookup_percpu(&key, MapFlags::ANY)
433 .expect("failed to lookup")
434 .expect("failed to find value for key");
435
436 assert_eq!(res, res2);
437 }
438
439 #[tag(root)]
440 #[test]
test_object_map_empty_lookup()441 fn test_object_map_empty_lookup() {
442 bump_rlimit_mlock();
443
444 let mut obj = get_test_object("runqslower.bpf.o");
445 let start = get_map_mut(&mut obj, "start");
446
447 assert!(start
448 .lookup(&[1, 2, 3, 4], MapFlags::empty())
449 .expect("err in map lookup")
450 .is_none());
451 }
452
453 /// Test CRUD operations on map of type queue.
454 #[tag(root)]
455 #[test]
test_object_map_queue_crud()456 fn test_object_map_queue_crud() {
457 bump_rlimit_mlock();
458
459 let mut obj = get_test_object("tracepoint.bpf.o");
460 let queue = get_map_mut(&mut obj, "queue");
461
462 let key: [u8; 0] = [];
463 let value1 = 42u32.to_ne_bytes();
464 let value2 = 43u32.to_ne_bytes();
465
466 // Test queue, FIFO expected
467 queue
468 .update(&key, &value1, MapFlags::ANY)
469 .expect("failed to update in queue");
470 queue
471 .update(&key, &value2, MapFlags::ANY)
472 .expect("failed to update in queue");
473
474 let mut val = queue
475 .lookup(&key, MapFlags::ANY)
476 .expect("failed to peek the queue")
477 .expect("failed to retrieve value");
478 assert_eq!(val.len(), 4);
479 assert_eq!(&val, &value1);
480
481 val = queue
482 .lookup_and_delete(&key)
483 .expect("failed to pop from queue")
484 .expect("failed to retrieve value");
485 assert_eq!(val.len(), 4);
486 assert_eq!(&val, &value1);
487
488 val = queue
489 .lookup_and_delete(&key)
490 .expect("failed to pop from queue")
491 .expect("failed to retrieve value");
492 assert_eq!(val.len(), 4);
493 assert_eq!(&val, &value2);
494
495 assert!(queue
496 .lookup_and_delete(&key)
497 .expect("failed to pop from queue")
498 .is_none());
499 }
500
501 /// Test CRUD operations on map of type bloomfilter.
502 #[tag(root)]
503 #[test]
test_object_map_bloom_filter_crud()504 fn test_object_map_bloom_filter_crud() {
505 bump_rlimit_mlock();
506
507 let mut obj = get_test_object("tracepoint.bpf.o");
508 let bloom_filter = get_map_mut(&mut obj, "bloom_filter");
509
510 let key: [u8; 0] = [];
511 let value1 = 1337u32.to_ne_bytes();
512 let value2 = 2674u32.to_ne_bytes();
513
514 bloom_filter
515 .update(&key, &value1, MapFlags::ANY)
516 .expect("failed to add entry value1 to bloom filter");
517
518 bloom_filter
519 .update(&key, &value2, MapFlags::ANY)
520 .expect("failed to add entry value2 in bloom filter");
521
522 // Non empty keys should result in an error
523 bloom_filter
524 .update(&value1, &value1, MapFlags::ANY)
525 .expect_err("Non empty key should return an error");
526
527 for inserted_value in [value1, value2] {
528 let val = bloom_filter
529 .lookup_bloom_filter(&inserted_value)
530 .expect("failed retrieve item from bloom filter");
531
532 assert!(val);
533 }
534 // Test non existing element
535 let enoent_found = bloom_filter
536 .lookup_bloom_filter(&[1, 2, 3, 4])
537 .expect("failed retrieve item from bloom filter");
538
539 assert!(!enoent_found);
540
541 // Calling lookup should result in an error
542 bloom_filter
543 .lookup(&[1, 2, 3, 4], MapFlags::ANY)
544 .expect_err("lookup should fail since we should use lookup_bloom_filter");
545
546 // Deleting should not be possible
547 bloom_filter
548 .lookup_and_delete(&key)
549 .expect_err("Expect delete to fail");
550 }
551
552 /// Test CRUD operations on map of type stack.
553 #[tag(root)]
554 #[test]
test_object_map_stack_crud()555 fn test_object_map_stack_crud() {
556 bump_rlimit_mlock();
557
558 let mut obj = get_test_object("tracepoint.bpf.o");
559 let stack = get_map_mut(&mut obj, "stack");
560
561 let key: [u8; 0] = [];
562 let value1 = 1337u32.to_ne_bytes();
563 let value2 = 2674u32.to_ne_bytes();
564
565 stack
566 .update(&key, &value1, MapFlags::ANY)
567 .expect("failed to update in stack");
568 stack
569 .update(&key, &value2, MapFlags::ANY)
570 .expect("failed to update in stack");
571
572 let mut val = stack
573 .lookup(&key, MapFlags::ANY)
574 .expect("failed to pop from stack")
575 .expect("failed to retrieve value");
576
577 assert_eq!(val.len(), 4);
578 assert_eq!(&val, &value2);
579
580 val = stack
581 .lookup_and_delete(&key)
582 .expect("failed to pop from stack")
583 .expect("failed to retrieve value");
584 assert_eq!(val.len(), 4);
585 assert_eq!(&val, &value2);
586
587 val = stack
588 .lookup_and_delete(&key)
589 .expect("failed to pop from stack")
590 .expect("failed to retrieve value");
591 assert_eq!(val.len(), 4);
592 assert_eq!(&val, &value1);
593
594 assert!(stack
595 .lookup_and_delete(&key)
596 .expect("failed to pop from stack")
597 .is_none());
598 }
599
600 #[tag(root)]
601 #[test]
test_object_map_mutation()602 fn test_object_map_mutation() {
603 bump_rlimit_mlock();
604
605 let mut obj = get_test_object("runqslower.bpf.o");
606 let start = get_map_mut(&mut obj, "start");
607 start
608 .update(&[1, 2, 3, 4], &[1, 2, 3, 4, 5, 6, 7, 8], MapFlags::empty())
609 .expect("failed to write");
610 let val = start
611 .lookup(&[1, 2, 3, 4], MapFlags::empty())
612 .expect("failed to read map")
613 .expect("failed to find key");
614 assert_eq!(val.len(), 8);
615 assert_eq!(val, &[1, 2, 3, 4, 5, 6, 7, 8]);
616
617 start.delete(&[1, 2, 3, 4]).expect("failed to delete key");
618
619 assert!(start
620 .lookup(&[1, 2, 3, 4], MapFlags::empty())
621 .expect("failed to read map")
622 .is_none());
623 }
624
625 #[tag(root)]
626 #[test]
test_object_map_lookup_flags()627 fn test_object_map_lookup_flags() {
628 bump_rlimit_mlock();
629
630 let mut obj = get_test_object("runqslower.bpf.o");
631 let start = get_map_mut(&mut obj, "start");
632 start
633 .update(&[1, 2, 3, 4], &[1, 2, 3, 4, 5, 6, 7, 8], MapFlags::NO_EXIST)
634 .expect("failed to write");
635 assert!(start
636 .update(&[1, 2, 3, 4], &[1, 2, 3, 4, 5, 6, 7, 8], MapFlags::NO_EXIST)
637 .is_err());
638 }
639
640 #[tag(root)]
641 #[test]
test_object_map_key_iter()642 fn test_object_map_key_iter() {
643 bump_rlimit_mlock();
644
645 let mut obj = get_test_object("runqslower.bpf.o");
646 let start = get_map_mut(&mut obj, "start");
647
648 let key1 = vec![1, 2, 3, 4];
649 let key2 = vec![1, 2, 3, 5];
650 let key3 = vec![1, 2, 3, 6];
651
652 start
653 .update(&key1, &[1, 2, 3, 4, 5, 6, 7, 8], MapFlags::empty())
654 .expect("failed to write");
655 start
656 .update(&key2, &[1, 2, 3, 4, 5, 6, 7, 8], MapFlags::empty())
657 .expect("failed to write");
658 start
659 .update(&key3, &[1, 2, 3, 4, 5, 6, 7, 8], MapFlags::empty())
660 .expect("failed to write");
661
662 let mut keys = HashSet::new();
663 for key in start.keys() {
664 keys.insert(key);
665 }
666 assert_eq!(keys.len(), 3);
667 assert!(keys.contains(&key1));
668 assert!(keys.contains(&key2));
669 assert!(keys.contains(&key3));
670 }
671
672 #[tag(root)]
673 #[test]
test_object_map_key_iter_empty()674 fn test_object_map_key_iter_empty() {
675 bump_rlimit_mlock();
676
677 let mut obj = get_test_object("runqslower.bpf.o");
678 let start = get_map_mut(&mut obj, "start");
679 let mut count = 0;
680 for _ in start.keys() {
681 count += 1;
682 }
683 assert_eq!(count, 0);
684 }
685
686 #[tag(root)]
687 #[test]
test_object_map_pin()688 fn test_object_map_pin() {
689 bump_rlimit_mlock();
690
691 let mut obj = get_test_object("runqslower.bpf.o");
692 let mut map = get_map_mut(&mut obj, "start");
693 let path = "/sys/fs/bpf/mymap_test_object_map_pin";
694
695 // Unpinning a unpinned map should be an error
696 assert!(map.unpin(path).is_err());
697 assert!(!Path::new(path).exists());
698
699 // Pin and unpin should be successful
700 map.pin(path).expect("failed to pin map");
701 assert!(Path::new(path).exists());
702 map.unpin(path).expect("failed to unpin map");
703 assert!(!Path::new(path).exists());
704 }
705
706 #[tag(root)]
707 #[test]
test_object_loading_pinned_map_from_path()708 fn test_object_loading_pinned_map_from_path() {
709 bump_rlimit_mlock();
710
711 let mut obj = get_test_object("runqslower.bpf.o");
712 let mut map = get_map_mut(&mut obj, "start");
713 let path = "/sys/fs/bpf/mymap_test_pin_to_load_from_path";
714
715 map.pin(path).expect("pinning map failed");
716
717 let pinned_map = MapHandle::from_pinned_path(path).expect("loading a map from a path failed");
718 map.unpin(path).expect("unpinning map failed");
719
720 assert_eq!(map.name(), pinned_map.name());
721 assert_eq!(
722 map.info().unwrap().info.id,
723 pinned_map.info().unwrap().info.id
724 );
725 }
726
727 #[tag(root)]
728 #[test]
test_program_loading_fd_from_pinned_path()729 fn test_program_loading_fd_from_pinned_path() {
730 bump_rlimit_mlock();
731
732 let path = "/sys/fs/bpf/myprog_test_pin_to_load_from_path";
733 let prog_name = "handle__sched_switch";
734
735 let mut obj = get_test_object("runqslower.bpf.o");
736 let mut prog = get_prog_mut(&mut obj, prog_name);
737 prog.pin(path).expect("pinning prog failed");
738 let prog_id = Program::id_from_fd(prog.as_fd()).expect("failed to determine prog id");
739
740 let pinned_prog_fd =
741 Program::fd_from_pinned_path(path).expect("failed to get fd of pinned prog");
742 let pinned_prog_id =
743 Program::id_from_fd(pinned_prog_fd.as_fd()).expect("failed to determine pinned prog id");
744
745 assert_eq!(prog_id, pinned_prog_id);
746
747 prog.unpin(path).expect("unpinning program failed");
748 }
749
750 #[tag(root)]
751 #[test]
test_program_loading_fd_from_pinned_path_with_wrong_pin_type()752 fn test_program_loading_fd_from_pinned_path_with_wrong_pin_type() {
753 bump_rlimit_mlock();
754
755 let path = "/sys/fs/bpf/mymap_test_pin_to_load_from_path";
756 let map_name = "events";
757
758 let mut obj = get_test_object("runqslower.bpf.o");
759 let mut map = get_map_mut(&mut obj, map_name);
760 map.pin(path).expect("pinning map failed");
761
762 // Must fail, as the pinned path points to a map, not program.
763 let _ = Program::fd_from_pinned_path(path).expect_err("program fd obtained from pinned map");
764
765 map.unpin(path).expect("unpinning program failed");
766 }
767
768 #[tag(root)]
769 #[test]
test_object_loading_loaded_map_from_id()770 fn test_object_loading_loaded_map_from_id() {
771 bump_rlimit_mlock();
772
773 let mut obj = get_test_object("runqslower.bpf.o");
774 let map = get_map_mut(&mut obj, "start");
775 let id = map.info().expect("to get info from map 'start'").info.id;
776
777 let map_by_id = MapHandle::from_map_id(id).expect("map to load from id");
778
779 assert_eq!(map.name(), map_by_id.name());
780 assert_eq!(
781 map.info().unwrap().info.id,
782 map_by_id.info().unwrap().info.id
783 );
784 }
785
786 #[tag(root)]
787 #[test]
test_object_programs()788 fn test_object_programs() {
789 bump_rlimit_mlock();
790
791 let mut obj = get_test_object("runqslower.bpf.o");
792 let _prog = get_prog_mut(&mut obj, "handle__sched_wakeup");
793 let _prog = get_prog_mut(&mut obj, "handle__sched_wakeup_new");
794 let _prog = get_prog_mut(&mut obj, "handle__sched_switch");
795 assert!(!obj.progs().any(|prog| prog.name() == OsStr::new("asdf")));
796 }
797
798 #[tag(root)]
799 #[test]
test_object_programs_iter_mut()800 fn test_object_programs_iter_mut() {
801 bump_rlimit_mlock();
802
803 let obj = get_test_object("runqslower.bpf.o");
804 assert!(obj.progs().count() == 3);
805 }
806
807 #[tag(root)]
808 #[test]
test_object_program_pin()809 fn test_object_program_pin() {
810 bump_rlimit_mlock();
811
812 let mut obj = get_test_object("runqslower.bpf.o");
813 let mut prog = get_prog_mut(&mut obj, "handle__sched_wakeup");
814 let path = "/sys/fs/bpf/myprog";
815
816 // Unpinning a unpinned prog should be an error
817 assert!(prog.unpin(path).is_err());
818 assert!(!Path::new(path).exists());
819
820 // Pin should be successful
821 prog.pin(path).expect("failed to pin prog");
822 assert!(Path::new(path).exists());
823
824 // Backup cleanup method in case test errors
825 defer! {
826 let _ = fs::remove_file(path);
827 }
828
829 // Unpin should be successful
830 prog.unpin(path).expect("failed to unpin prog");
831 assert!(!Path::new(path).exists());
832 }
833
834 #[tag(root)]
835 #[test]
test_object_link_pin()836 fn test_object_link_pin() {
837 bump_rlimit_mlock();
838
839 let mut obj = get_test_object("runqslower.bpf.o");
840 let prog = get_prog_mut(&mut obj, "handle__sched_wakeup");
841 let mut link = prog.attach().expect("failed to attach prog");
842
843 let path = "/sys/fs/bpf/mylink";
844
845 // Unpinning a unpinned prog should be an error
846 assert!(link.unpin().is_err());
847 assert!(!Path::new(path).exists());
848
849 // Pin should be successful
850 link.pin(path).expect("failed to pin prog");
851 assert!(Path::new(path).exists());
852
853 // Backup cleanup method in case test errors
854 defer! {
855 let _ = fs::remove_file(path);
856 }
857
858 // Unpin should be successful
859 link.unpin().expect("failed to unpin prog");
860 assert!(!Path::new(path).exists());
861 }
862
863 #[tag(root)]
864 #[test]
test_object_reuse_pined_map()865 fn test_object_reuse_pined_map() {
866 bump_rlimit_mlock();
867
868 let path = "/sys/fs/bpf/mymap_test_object_reuse_pined_map";
869 let key = vec![1, 2, 3, 4];
870 let val = vec![1, 2, 3, 4, 5, 6, 7, 8];
871
872 // Pin a map
873 {
874 let mut obj = get_test_object("runqslower.bpf.o");
875 let mut map = get_map_mut(&mut obj, "start");
876 map.update(&key, &val, MapFlags::empty())
877 .expect("failed to write");
878
879 // Pin map
880 map.pin(path).expect("failed to pin map");
881 assert!(Path::new(path).exists());
882 }
883
884 // Backup cleanup method in case test errors somewhere
885 defer! {
886 let _ = fs::remove_file(path);
887 }
888
889 // Reuse the pinned map
890 let obj_path = get_test_object_path("runqslower.bpf.o");
891 let mut builder = ObjectBuilder::default();
892 builder.debug(true);
893 let mut open_obj = builder.open_file(obj_path).expect("failed to open object");
894 let mut start = open_obj
895 .maps_mut()
896 .find(|map| map.name() == OsStr::new("start"))
897 .expect("failed to find `start` map");
898 assert!(start.reuse_pinned_map("/asdf").is_err());
899 start.reuse_pinned_map(path).expect("failed to reuse map");
900
901 let mut obj = open_obj.load().expect("failed to load object");
902 let mut reused_map = get_map_mut(&mut obj, "start");
903 let found_val = reused_map
904 .lookup(&key, MapFlags::empty())
905 .expect("failed to read map")
906 .expect("failed to find key");
907 assert_eq!(&found_val, &val);
908
909 // Cleanup
910 reused_map.unpin(path).expect("failed to unpin map");
911 assert!(!Path::new(path).exists());
912 }
913
914 #[tag(root)]
915 #[test]
test_object_ringbuf_raw()916 fn test_object_ringbuf_raw() {
917 bump_rlimit_mlock();
918
919 let mut obj = get_test_object("ringbuf.bpf.o");
920 let prog = get_prog_mut(&mut obj, "handle__sys_enter_getpid");
921 let _link = prog.attach().expect("failed to attach prog");
922
923 static V1: AtomicI32 = AtomicI32::new(0);
924 static V2: AtomicI32 = AtomicI32::new(0);
925
926 fn callback1(data: &[u8]) -> i32 {
927 let mut value: i32 = 0;
928 plain::copy_from_bytes(&mut value, data).expect("Wrong size");
929
930 V1.store(value, Ordering::SeqCst);
931 0
932 }
933
934 fn callback2(data: &[u8]) -> i32 {
935 let mut value: i32 = 0;
936 plain::copy_from_bytes(&mut value, data).expect("Wrong size");
937
938 V2.store(value, Ordering::SeqCst);
939 0
940 }
941
942 // Test trying to build without adding any ringbufs
943 // Can't use expect_err here since RingBuffer does not implement Debug
944 let builder = libbpf_rs::RingBufferBuilder::new();
945 assert!(
946 builder.build().is_err(),
947 "Should not be able to build without adding at least one ringbuf"
948 );
949
950 // Test building with multiple map objects
951 let mut builder = libbpf_rs::RingBufferBuilder::new();
952
953 // Add a first map and callback
954 let map1 = get_map(&obj, "ringbuf1");
955 builder
956 .add(&map1, callback1)
957 .expect("failed to add ringbuf");
958
959 // Add a second map and callback
960 let map2 = get_map(&obj, "ringbuf2");
961 builder
962 .add(&map2, callback2)
963 .expect("failed to add ringbuf");
964
965 let mgr = builder.build().expect("failed to build");
966
967 // Call getpid to ensure the BPF program runs
968 unsafe { libc::getpid() };
969
970 // Test raw primitives
971 let ret = mgr.consume_raw();
972
973 // We can't check for exact return values, since other tasks in the system may call getpid(),
974 // triggering the BPF program
975 assert!(ret >= 2);
976
977 assert_eq!(V1.load(Ordering::SeqCst), 1);
978 assert_eq!(V2.load(Ordering::SeqCst), 2);
979
980 // Consume from a (potentially) empty ring buffer
981 let ret = mgr.consume_raw();
982 assert!(ret >= 0);
983
984 // Consume from a (potentially) empty ring buffer using poll()
985 let ret = mgr.poll_raw(Duration::from_millis(100));
986 assert!(ret >= 0);
987 }
988
989 #[tag(root)]
990 #[test]
test_object_ringbuf_err_callback()991 fn test_object_ringbuf_err_callback() {
992 bump_rlimit_mlock();
993
994 let mut obj = get_test_object("ringbuf.bpf.o");
995 let prog = get_prog_mut(&mut obj, "handle__sys_enter_getpid");
996 let _link = prog.attach().expect("failed to attach prog");
997
998 // Immediately trigger an error that should be reported back to the consume_raw() or poll_raw()
999 fn callback1(_data: &[u8]) -> i32 {
1000 -libc::ENOENT
1001 }
1002
1003 // Immediately trigger an error that should be reported back to the consume_raw() or poll_raw()
1004 fn callback2(_data: &[u8]) -> i32 {
1005 -libc::EPERM
1006 }
1007
1008 // Test trying to build without adding any ringbufs
1009 // Can't use expect_err here since RingBuffer does not implement Debug
1010 let builder = libbpf_rs::RingBufferBuilder::new();
1011 assert!(
1012 builder.build().is_err(),
1013 "Should not be able to build without adding at least one ringbuf"
1014 );
1015
1016 // Test building with multiple map objects
1017 let mut builder = libbpf_rs::RingBufferBuilder::new();
1018
1019 // Add a first map and callback
1020 let map1 = get_map(&obj, "ringbuf1");
1021 builder
1022 .add(&map1, callback1)
1023 .expect("failed to add ringbuf");
1024
1025 // Add a second map and callback
1026 let map2 = get_map(&obj, "ringbuf2");
1027 builder
1028 .add(&map2, callback2)
1029 .expect("failed to add ringbuf");
1030
1031 let mgr = builder.build().expect("failed to build");
1032
1033 // Call getpid to ensure the BPF program runs
1034 unsafe { libc::getpid() };
1035
1036 // Test raw primitives
1037 let ret = mgr.consume_raw();
1038
1039 // The error originated from the first callback executed should be reported here, either
1040 // from callback1() or callback2()
1041 assert!(ret == -libc::ENOENT || ret == -libc::EPERM);
1042
1043 unsafe { libc::getpid() };
1044
1045 // The same behavior should happen with poll_raw()
1046 let ret = mgr.poll_raw(Duration::from_millis(100));
1047
1048 assert!(ret == -libc::ENOENT || ret == -libc::EPERM);
1049 }
1050
1051 #[tag(root)]
1052 #[test]
test_object_ringbuf()1053 fn test_object_ringbuf() {
1054 bump_rlimit_mlock();
1055
1056 let mut obj = get_test_object("ringbuf.bpf.o");
1057 let prog = get_prog_mut(&mut obj, "handle__sys_enter_getpid");
1058 let _link = prog.attach().expect("failed to attach prog");
1059
1060 static V1: AtomicI32 = AtomicI32::new(0);
1061 static V2: AtomicI32 = AtomicI32::new(0);
1062
1063 fn callback1(data: &[u8]) -> i32 {
1064 let mut value: i32 = 0;
1065 plain::copy_from_bytes(&mut value, data).expect("Wrong size");
1066
1067 V1.store(value, Ordering::SeqCst);
1068 0
1069 }
1070
1071 fn callback2(data: &[u8]) -> i32 {
1072 let mut value: i32 = 0;
1073 plain::copy_from_bytes(&mut value, data).expect("Wrong size");
1074
1075 V2.store(value, Ordering::SeqCst);
1076 0
1077 }
1078
1079 // Test trying to build without adding any ringbufs
1080 // Can't use expect_err here since RingBuffer does not implement Debug
1081 let builder = libbpf_rs::RingBufferBuilder::new();
1082 assert!(
1083 builder.build().is_err(),
1084 "Should not be able to build without adding at least one ringbuf"
1085 );
1086
1087 // Test building with multiple map objects
1088 let mut builder = libbpf_rs::RingBufferBuilder::new();
1089
1090 // Add a first map and callback
1091 let map1 = get_map(&obj, "ringbuf1");
1092 builder
1093 .add(&map1, callback1)
1094 .expect("failed to add ringbuf");
1095
1096 // Add a second map and callback
1097 let map2 = get_map(&obj, "ringbuf2");
1098 builder
1099 .add(&map2, callback2)
1100 .expect("failed to add ringbuf");
1101
1102 let mgr = builder.build().expect("failed to build");
1103
1104 // Call getpid to ensure the BPF program runs
1105 unsafe { libc::getpid() };
1106
1107 // This should result in both callbacks being called
1108 mgr.consume().expect("failed to consume ringbuf");
1109
1110 // Our values should both reflect that the callbacks have been called
1111 assert_eq!(V1.load(Ordering::SeqCst), 1);
1112 assert_eq!(V2.load(Ordering::SeqCst), 2);
1113
1114 // Reset both values
1115 V1.store(0, Ordering::SeqCst);
1116 V2.store(0, Ordering::SeqCst);
1117
1118 // Call getpid to ensure the BPF program runs
1119 unsafe { libc::getpid() };
1120
1121 // This should result in both callbacks being called
1122 mgr.poll(Duration::from_millis(100))
1123 .expect("failed to poll ringbuf");
1124
1125 // Our values should both reflect that the callbacks have been called
1126 assert_eq!(V1.load(Ordering::SeqCst), 1);
1127 assert_eq!(V2.load(Ordering::SeqCst), 2);
1128 }
1129
1130 #[tag(root)]
1131 #[test]
test_object_ringbuf_closure()1132 fn test_object_ringbuf_closure() {
1133 bump_rlimit_mlock();
1134
1135 let mut obj = get_test_object("ringbuf.bpf.o");
1136 let prog = get_prog_mut(&mut obj, "handle__sys_enter_getpid");
1137 let _link = prog.attach().expect("failed to attach prog");
1138
1139 let (sender1, receiver1) = channel();
1140 let callback1 = move |data: &[u8]| -> i32 {
1141 let mut value: i32 = 0;
1142 plain::copy_from_bytes(&mut value, data).expect("Wrong size");
1143
1144 sender1.send(value).expect("failed to send value");
1145
1146 0
1147 };
1148
1149 let (sender2, receiver2) = channel();
1150 let callback2 = move |data: &[u8]| -> i32 {
1151 let mut value: i32 = 0;
1152 plain::copy_from_bytes(&mut value, data).expect("Wrong size");
1153
1154 sender2.send(value).expect("failed to send value");
1155
1156 0
1157 };
1158
1159 // Test trying to build without adding any ringbufs
1160 // Can't use expect_err here since RingBuffer does not implement Debug
1161 let builder = libbpf_rs::RingBufferBuilder::new();
1162 assert!(
1163 builder.build().is_err(),
1164 "Should not be able to build without adding at least one ringbuf"
1165 );
1166
1167 // Test building with multiple map objects
1168 let mut builder = libbpf_rs::RingBufferBuilder::new();
1169
1170 // Add a first map and callback
1171 let map1 = get_map(&obj, "ringbuf1");
1172 builder
1173 .add(&map1, callback1)
1174 .expect("failed to add ringbuf");
1175
1176 // Add a second map and callback
1177 let map2 = get_map(&obj, "ringbuf2");
1178 builder
1179 .add(&map2, callback2)
1180 .expect("failed to add ringbuf");
1181
1182 let mgr = builder.build().expect("failed to build");
1183
1184 // Call getpid to ensure the BPF program runs
1185 unsafe { libc::getpid() };
1186
1187 // This should result in both callbacks being called
1188 mgr.consume().expect("failed to consume ringbuf");
1189
1190 let v1 = receiver1.recv().expect("failed to receive value");
1191 let v2 = receiver2.recv().expect("failed to receive value");
1192
1193 assert_eq!(v1, 1);
1194 assert_eq!(v2, 2);
1195 }
1196
1197 /// Check that `RingBuffer` works correctly even if the map file descriptors
1198 /// provided during construction are closed. This test validates that `libbpf`'s
1199 /// refcount behavior is correctly reflected in our `RingBuffer` lifetimes.
1200 #[tag(root)]
1201 #[test]
test_object_ringbuf_with_closed_map()1202 fn test_object_ringbuf_with_closed_map() {
1203 bump_rlimit_mlock();
1204
1205 fn test(poll_fn: impl FnOnce(&libbpf_rs::RingBuffer)) {
1206 let mut value = 0i32;
1207
1208 {
1209 let mut obj = get_test_object("tracepoint.bpf.o");
1210 let prog = get_prog_mut(&mut obj, "handle__tracepoint");
1211 let _link = prog
1212 .attach_tracepoint("syscalls", "sys_enter_getpid")
1213 .expect("failed to attach prog");
1214
1215 let map = get_map_mut(&mut obj, "ringbuf");
1216
1217 let callback = |data: &[u8]| {
1218 plain::copy_from_bytes(&mut value, data).expect("Wrong size");
1219 0
1220 };
1221
1222 let mut builder = libbpf_rs::RingBufferBuilder::new();
1223 builder.add(&map, callback).expect("failed to add ringbuf");
1224 let ringbuf = builder.build().expect("failed to build");
1225
1226 drop(obj);
1227
1228 // Trigger the tracepoint. At this point `map` along with the containing
1229 // `obj` have been destroyed.
1230 let _pid = unsafe { libc::getpid() };
1231 let () = poll_fn(&ringbuf);
1232 }
1233
1234 // If we see a 1 here the ring buffer was still working as expected.
1235 assert_eq!(value, 1);
1236 }
1237
1238 test(|ringbuf| ringbuf.consume().expect("failed to consume ringbuf"));
1239 test(|ringbuf| {
1240 ringbuf
1241 .poll(Duration::from_secs(5))
1242 .expect("failed to poll ringbuf")
1243 });
1244 }
1245
1246 #[tag(root)]
1247 #[test]
test_object_user_ringbuf()1248 fn test_object_user_ringbuf() {
1249 #[repr(C)]
1250 struct MyStruct {
1251 key: u32,
1252 value: u32,
1253 }
1254
1255 unsafe impl Plain for MyStruct {}
1256
1257 bump_rlimit_mlock();
1258
1259 let mut obj = get_test_object("user_ringbuf.bpf.o");
1260 let prog = get_prog_mut(&mut obj, "handle__sys_enter_getpid");
1261 let _link = prog.attach().expect("failed to attach prog");
1262 let urb_map = get_map_mut(&mut obj, "user_ringbuf");
1263 let user_ringbuf = UserRingBuffer::new(&urb_map).expect("failed to create user ringbuf");
1264 let mut urb_sample = user_ringbuf
1265 .reserve(size_of::<MyStruct>())
1266 .expect("failed to reserve space");
1267 let bytes = urb_sample.as_mut();
1268 let my_struct = plain::from_mut_bytes::<MyStruct>(bytes).expect("failed to convert bytes");
1269 my_struct.key = 42;
1270 my_struct.value = 1337;
1271 user_ringbuf
1272 .submit(urb_sample)
1273 .expect("failed to submit sample");
1274
1275 // Trigger BPF program.
1276 let _pid = unsafe { libc::getpid() };
1277
1278 // At this point, the BPF program should have run and consumed the sample in
1279 // the user ring buffer, and stored the key/value in the samples map.
1280 let samples_map = get_map_mut(&mut obj, "samples");
1281 let key: u32 = 42;
1282 let value: u32 = 1337;
1283 let res = samples_map
1284 .lookup(&key.to_ne_bytes(), MapFlags::ANY)
1285 .expect("failed to lookup")
1286 .expect("failed to find value for key");
1287
1288 // The value in the samples map should be the same as the value we submitted
1289 assert_eq!(res.len(), size_of::<u32>());
1290 let mut array = [0; size_of::<u32>()];
1291 array.copy_from_slice(&res[..]);
1292 assert_eq!(u32::from_ne_bytes(array), value);
1293 }
1294
1295 #[tag(root)]
1296 #[test]
test_object_user_ringbuf_reservation_too_big()1297 fn test_object_user_ringbuf_reservation_too_big() {
1298 bump_rlimit_mlock();
1299
1300 let mut obj = get_test_object("user_ringbuf.bpf.o");
1301 let prog = get_prog_mut(&mut obj, "handle__sys_enter_getpid");
1302 let _link = prog.attach().expect("failed to attach prog");
1303 let urb_map = get_map_mut(&mut obj, "user_ringbuf");
1304 let user_ringbuf = UserRingBuffer::new(&urb_map).expect("failed to create user ringbuf");
1305 let err = user_ringbuf.reserve(1024 * 1024).unwrap_err();
1306 assert!(
1307 err.to_string().contains("requested size is too large"),
1308 "{err:#}"
1309 );
1310 }
1311
1312 #[tag(root)]
1313 #[test]
test_object_user_ringbuf_not_enough_space()1314 fn test_object_user_ringbuf_not_enough_space() {
1315 bump_rlimit_mlock();
1316
1317 let mut obj = get_test_object("user_ringbuf.bpf.o");
1318 let prog = get_prog_mut(&mut obj, "handle__sys_enter_getpid");
1319 let _link = prog.attach().expect("failed to attach prog");
1320 let urb_map = get_map_mut(&mut obj, "user_ringbuf");
1321 let user_ringbuf = UserRingBuffer::new(&urb_map).expect("failed to create user ringbuf");
1322 let _ = user_ringbuf
1323 .reserve(1024 * 3)
1324 .expect("failed to reserve space");
1325 let err = user_ringbuf.reserve(1024 * 3).unwrap_err();
1326 assert!(
1327 err.to_string()
1328 .contains("not enough space in the ring buffer"),
1329 "{err:#}"
1330 );
1331 }
1332
1333 #[tag(root)]
1334 #[test]
test_object_task_iter()1335 fn test_object_task_iter() {
1336 bump_rlimit_mlock();
1337
1338 let mut obj = get_test_object("taskiter.bpf.o");
1339 let prog = get_prog_mut(&mut obj, "dump_pid");
1340 let link = prog.attach().expect("failed to attach prog");
1341 let mut iter = Iter::new(&link).expect("failed to create iterator");
1342
1343 #[repr(C)]
1344 #[derive(Clone, Copy)]
1345 struct IndexPidPair {
1346 i: u32,
1347 pid: i32,
1348 }
1349
1350 unsafe impl Plain for IndexPidPair {}
1351
1352 let mut buf = Vec::new();
1353 let bytes_read = iter
1354 .read_to_end(&mut buf)
1355 .expect("failed to read from iterator");
1356
1357 assert!(bytes_read > 0);
1358 assert_eq!(bytes_read % size_of::<IndexPidPair>(), 0);
1359 let items: &[IndexPidPair] =
1360 plain::slice_from_bytes(buf.as_slice()).expect("Input slice cannot satisfy length");
1361
1362 assert!(!items.is_empty());
1363 assert_eq!(items[0].i, 0);
1364 assert!(items.windows(2).all(|w| w[0].i + 1 == w[1].i));
1365 // Check for init
1366 assert!(items.iter().any(|&item| item.pid == 1));
1367 }
1368
1369 #[tag(root)]
1370 #[test]
test_object_map_iter()1371 fn test_object_map_iter() {
1372 bump_rlimit_mlock();
1373
1374 // Create a map for iteration test.
1375 let opts = libbpf_sys::bpf_map_create_opts {
1376 sz: size_of::<libbpf_sys::bpf_map_create_opts>() as libbpf_sys::size_t,
1377 map_flags: libbpf_sys::BPF_F_NO_PREALLOC,
1378 ..Default::default()
1379 };
1380 let map = MapHandle::create(
1381 MapType::Hash,
1382 Some("mymap_test_object_map_iter"),
1383 4,
1384 8,
1385 8,
1386 &opts,
1387 )
1388 .expect("failed to create map");
1389
1390 // Insert 3 elements.
1391 for i in 0..3 {
1392 let key = i32::to_ne_bytes(i);
1393 // We can change i to larger for more robust test, that's why we use a and b.
1394 let val = [&key[..], &[0_u8; 4]].concat();
1395 map.update(&key, val.as_slice(), MapFlags::empty())
1396 .expect("failed to write");
1397 }
1398
1399 let mut obj = get_test_object("mapiter.bpf.o");
1400 let prog = get_prog_mut(&mut obj, "map_iter");
1401 let link = prog
1402 .attach_iter(map.as_fd())
1403 .expect("failed to attach map iter prog");
1404 let mut iter = Iter::new(&link).expect("failed to create map iterator");
1405
1406 let mut buf = Vec::new();
1407 let bytes_read = iter
1408 .read_to_end(&mut buf)
1409 .expect("failed to read from iterator");
1410
1411 assert!(bytes_read > 0);
1412 assert_eq!(bytes_read % size_of::<u32>(), 0);
1413 // Convert buf to &[u32]
1414 let buf =
1415 plain::slice_from_bytes::<u32>(buf.as_slice()).expect("Input slice cannot satisfy length");
1416 assert!(buf.contains(&0));
1417 assert!(buf.contains(&1));
1418 assert!(buf.contains(&2));
1419 }
1420
1421 #[tag(root)]
1422 #[test]
test_object_map_create_and_pin()1423 fn test_object_map_create_and_pin() {
1424 bump_rlimit_mlock();
1425
1426 let opts = libbpf_sys::bpf_map_create_opts {
1427 sz: size_of::<libbpf_sys::bpf_map_create_opts>() as libbpf_sys::size_t,
1428 map_flags: libbpf_sys::BPF_F_NO_PREALLOC,
1429 ..Default::default()
1430 };
1431
1432 let mut map = MapHandle::create(
1433 MapType::Hash,
1434 Some("mymap_test_object_map_create_and_pin"),
1435 4,
1436 8,
1437 8,
1438 &opts,
1439 )
1440 .expect("failed to create map");
1441
1442 assert_eq!(map.name(), "mymap_test_object_map_create_and_pin");
1443
1444 let key = vec![1, 2, 3, 4];
1445 let val = vec![1, 2, 3, 4, 5, 6, 7, 8];
1446 map.update(&key, &val, MapFlags::empty())
1447 .expect("failed to write");
1448 let res = map
1449 .lookup(&key, MapFlags::ANY)
1450 .expect("failed to lookup")
1451 .expect("failed to find value for key");
1452 assert_eq!(val, res);
1453
1454 let path = "/sys/fs/bpf/mymap_test_object_map_create_and_pin";
1455
1456 // Unpinning a unpinned map should be an error
1457 assert!(map.unpin(path).is_err());
1458 assert!(!Path::new(path).exists());
1459
1460 // Pin and unpin should be successful
1461 map.pin(path).expect("failed to pin map");
1462 assert!(Path::new(path).exists());
1463 map.unpin(path).expect("failed to unpin map");
1464 assert!(!Path::new(path).exists());
1465 }
1466
1467 #[tag(root)]
1468 #[test]
test_object_map_create_without_name()1469 fn test_object_map_create_without_name() {
1470 bump_rlimit_mlock();
1471
1472 #[allow(clippy::needless_update)]
1473 let opts = libbpf_sys::bpf_map_create_opts {
1474 sz: size_of::<libbpf_sys::bpf_map_create_opts>() as libbpf_sys::size_t,
1475 map_flags: libbpf_sys::BPF_F_NO_PREALLOC,
1476 btf_fd: 0,
1477 btf_key_type_id: 0,
1478 btf_value_type_id: 0,
1479 btf_vmlinux_value_type_id: 0,
1480 inner_map_fd: 0,
1481 map_extra: 0,
1482 numa_node: 0,
1483 map_ifindex: 0,
1484 // bpf_map_create_opts might have padding fields on some platform
1485 ..Default::default()
1486 };
1487
1488 let map = MapHandle::create(MapType::Hash, Option::<&str>::None, 4, 8, 8, &opts)
1489 .expect("failed to create map");
1490
1491 assert!(map.name().is_empty());
1492
1493 let key = vec![1, 2, 3, 4];
1494 let val = vec![1, 2, 3, 4, 5, 6, 7, 8];
1495 map.update(&key, &val, MapFlags::empty())
1496 .expect("failed to write");
1497 let res = map
1498 .lookup(&key, MapFlags::ANY)
1499 .expect("failed to lookup")
1500 .expect("failed to find value for key");
1501 assert_eq!(val, res);
1502 }
1503
1504 /// Test whether we can obtain multiple `MapHandle`s from a `Map
1505 #[tag(root)]
1506 #[test]
test_object_map_handle_clone()1507 fn test_object_map_handle_clone() {
1508 bump_rlimit_mlock();
1509
1510 let mut obj = get_test_object("runqslower.bpf.o");
1511 let map = get_map_mut(&mut obj, "events");
1512 let handle1 = MapHandle::try_from(&map).expect("failed to create handle from Map");
1513 assert_eq!(map.name(), handle1.name());
1514 assert_eq!(map.map_type(), handle1.map_type());
1515 assert_eq!(map.key_size(), handle1.key_size());
1516 assert_eq!(map.value_size(), handle1.value_size());
1517
1518 let handle2 = MapHandle::try_from(&handle1).expect("failed to duplicate existing handle");
1519 assert_eq!(handle1.name(), handle2.name());
1520 assert_eq!(handle1.map_type(), handle2.map_type());
1521 assert_eq!(handle1.key_size(), handle2.key_size());
1522 assert_eq!(handle1.value_size(), handle2.value_size());
1523
1524 let info1 = map.info().expect("failed to get map info from map");
1525 let info2 = handle2.info().expect("failed to get map info from handle");
1526 assert_eq!(
1527 info1.info.id, info2.info.id,
1528 "Map and MapHandle have different IDs"
1529 );
1530 }
1531
1532 #[tag(root)]
1533 #[test]
test_object_usdt()1534 fn test_object_usdt() {
1535 bump_rlimit_mlock();
1536
1537 let mut obj = get_test_object("usdt.bpf.o");
1538 let prog = get_prog_mut(&mut obj, "handle__usdt");
1539
1540 let path = current_exe().expect("failed to find executable name");
1541 let _link = prog
1542 .attach_usdt(
1543 unsafe { libc::getpid() },
1544 &path,
1545 "test_provider",
1546 "test_function",
1547 )
1548 .expect("failed to attach prog");
1549
1550 let map = get_map_mut(&mut obj, "ringbuf");
1551 let action = || {
1552 // Define a USDT probe point and exercise it as we are attaching to self.
1553 probe!(test_provider, test_function, 1);
1554 };
1555 let result = with_ringbuffer(&map, action);
1556
1557 assert_eq!(result, 1);
1558 }
1559
1560 #[tag(root)]
1561 #[test]
test_object_usdt_cookie()1562 fn test_object_usdt_cookie() {
1563 bump_rlimit_mlock();
1564
1565 let cookie_val = 1337u16;
1566 let mut obj = get_test_object("usdt.bpf.o");
1567 let prog = get_prog_mut(&mut obj, "handle__usdt_with_cookie");
1568
1569 let path = current_exe().expect("failed to find executable name");
1570 let _link = prog
1571 .attach_usdt_with_opts(
1572 unsafe { libc::getpid() },
1573 &path,
1574 "test_provider",
1575 "test_function2",
1576 UsdtOpts {
1577 cookie: cookie_val.into(),
1578 ..UsdtOpts::default()
1579 },
1580 )
1581 .expect("failed to attach prog");
1582
1583 let map = get_map_mut(&mut obj, "ringbuf");
1584 let action = || {
1585 // Define a USDT probe point and exercise it as we are attaching to self.
1586 probe!(test_provider, test_function2, 1);
1587 };
1588 let result = with_ringbuffer(&map, action);
1589
1590 assert_eq!(result, cookie_val.into());
1591 }
1592
1593 #[tag(root)]
1594 #[test]
test_map_probes()1595 fn test_map_probes() {
1596 bump_rlimit_mlock();
1597
1598 let supported = MapType::Array
1599 .is_supported()
1600 .expect("failed to query if Array map is supported");
1601 assert!(supported);
1602 let supported_res = MapType::Unknown.is_supported();
1603 assert!(supported_res.is_err());
1604 }
1605
1606 #[tag(root)]
1607 #[test]
test_program_probes()1608 fn test_program_probes() {
1609 bump_rlimit_mlock();
1610
1611 let supported = ProgramType::SocketFilter
1612 .is_supported()
1613 .expect("failed to query if SocketFilter program is supported");
1614 assert!(supported);
1615 let supported_res = ProgramType::Unknown.is_supported();
1616 assert!(supported_res.is_err());
1617 }
1618
1619 #[tag(root)]
1620 #[test]
test_program_helper_probes()1621 fn test_program_helper_probes() {
1622 bump_rlimit_mlock();
1623
1624 let supported = ProgramType::SocketFilter
1625 .is_helper_supported(libbpf_sys::BPF_FUNC_map_lookup_elem)
1626 .expect("failed to query if helper supported");
1627 assert!(supported);
1628 // redirect should not be supported from socket filter, as it is only used in TC/XDP.
1629 let supported = ProgramType::SocketFilter
1630 .is_helper_supported(libbpf_sys::BPF_FUNC_redirect)
1631 .expect("failed to query if helper supported");
1632 assert!(!supported);
1633 let supported_res = MapType::Unknown.is_supported();
1634 assert!(supported_res.is_err());
1635 }
1636
1637 #[tag(root)]
1638 #[test]
test_object_open_program_insns()1639 fn test_object_open_program_insns() {
1640 bump_rlimit_mlock();
1641
1642 let open_obj = open_test_object("usdt.bpf.o");
1643 let prog = open_obj
1644 .progs()
1645 .find(|prog| prog.name() == OsStr::new("handle__usdt"))
1646 .expect("failed to find program");
1647
1648 let insns = prog.insns();
1649 assert!(!insns.is_empty());
1650 }
1651
1652 #[tag(root)]
1653 #[test]
test_object_program_insns()1654 fn test_object_program_insns() {
1655 bump_rlimit_mlock();
1656
1657 let mut obj = get_test_object("usdt.bpf.o");
1658 let prog = get_prog_mut(&mut obj, "handle__usdt");
1659 let insns = prog.insns();
1660 assert!(!insns.is_empty());
1661 }
1662
1663 /// Check that we can attach a BPF program to a kernel tracepoint.
1664 #[tag(root)]
1665 #[test]
test_object_tracepoint()1666 fn test_object_tracepoint() {
1667 bump_rlimit_mlock();
1668
1669 let mut obj = get_test_object("tracepoint.bpf.o");
1670 let prog = get_prog_mut(&mut obj, "handle__tracepoint");
1671 let _link = prog
1672 .attach_tracepoint("syscalls", "sys_enter_getpid")
1673 .expect("failed to attach prog");
1674
1675 let map = get_map_mut(&mut obj, "ringbuf");
1676 let action = || {
1677 let _pid = unsafe { libc::getpid() };
1678 };
1679 let result = with_ringbuffer(&map, action);
1680
1681 assert_eq!(result, 1);
1682 }
1683
1684 /// Check that we can attach a BPF program to a kernel tracepoint, providing
1685 /// additional options.
1686 #[tag(root)]
1687 #[test]
test_object_tracepoint_with_opts()1688 fn test_object_tracepoint_with_opts() {
1689 bump_rlimit_mlock();
1690
1691 let cookie_val = 42u16;
1692 let mut obj = get_test_object("tracepoint.bpf.o");
1693 let prog = get_prog_mut(&mut obj, "handle__tracepoint_with_cookie");
1694
1695 let opts = TracepointOpts {
1696 cookie: cookie_val.into(),
1697 ..TracepointOpts::default()
1698 };
1699 let _link = prog
1700 .attach_tracepoint_with_opts("syscalls", "sys_enter_getpid", opts)
1701 .expect("failed to attach prog");
1702
1703 let map = get_map_mut(&mut obj, "ringbuf");
1704 let action = || {
1705 let _pid = unsafe { libc::getpid() };
1706 };
1707 let result = with_ringbuffer(&map, action);
1708
1709 assert_eq!(result, cookie_val.into());
1710 }
1711
1712 #[inline(never)]
1713 #[no_mangle]
uprobe_target() -> usize1714 extern "C" fn uprobe_target() -> usize {
1715 // Use `black_box` here as an additional barrier to inlining.
1716 hint::black_box(42)
1717 }
1718
1719 /// Check that we can attach a BPF program to a uprobe.
1720 #[tag(root)]
1721 #[test]
test_object_uprobe_with_opts()1722 fn test_object_uprobe_with_opts() {
1723 bump_rlimit_mlock();
1724
1725 let mut obj = get_test_object("uprobe.bpf.o");
1726 let prog = get_prog_mut(&mut obj, "handle__uprobe");
1727
1728 let pid = unsafe { libc::getpid() };
1729 let path = current_exe().expect("failed to find executable name");
1730 let func_offset = 0;
1731 let opts = UprobeOpts {
1732 func_name: "uprobe_target".to_string(),
1733 ..Default::default()
1734 };
1735 let _link = prog
1736 .attach_uprobe_with_opts(pid, path, func_offset, opts)
1737 .expect("failed to attach prog");
1738
1739 let map = get_map_mut(&mut obj, "ringbuf");
1740 let action = || {
1741 let _ = uprobe_target();
1742 };
1743 let result = with_ringbuffer(&map, action);
1744
1745 assert_eq!(result, 1);
1746 }
1747
1748 /// Check that we can attach a BPF program to a uprobe and access the cookie
1749 /// provided during attach.
1750 #[tag(root)]
1751 #[test]
test_object_uprobe_with_cookie()1752 fn test_object_uprobe_with_cookie() {
1753 bump_rlimit_mlock();
1754
1755 let cookie_val = 5u16;
1756 let mut obj = get_test_object("uprobe.bpf.o");
1757 let prog = get_prog_mut(&mut obj, "handle__uprobe_with_cookie");
1758
1759 let pid = unsafe { libc::getpid() };
1760 let path = current_exe().expect("failed to find executable name");
1761 let func_offset = 0;
1762 let opts = UprobeOpts {
1763 func_name: "uprobe_target".to_string(),
1764 cookie: cookie_val.into(),
1765 ..Default::default()
1766 };
1767 let _link = prog
1768 .attach_uprobe_with_opts(pid, path, func_offset, opts)
1769 .expect("failed to attach prog");
1770
1771 let map = get_map_mut(&mut obj, "ringbuf");
1772 let action = || {
1773 let _ = uprobe_target();
1774 };
1775 let result = with_ringbuffer(&map, action);
1776
1777 assert_eq!(result, cookie_val.into());
1778 }
1779
1780 /// Check that we can link multiple object files.
1781 #[test]
test_object_link_files()1782 fn test_object_link_files() {
1783 fn test(files: Vec<PathBuf>) {
1784 let output_file = NamedTempFile::new().unwrap();
1785
1786 let mut linker = Linker::new(output_file.path()).unwrap();
1787 let () = files
1788 .into_iter()
1789 .try_for_each(|file| linker.add_file(file))
1790 .unwrap();
1791 let () = linker.link().unwrap();
1792
1793 // Check that we can load the resulting object file.
1794 let _object = ObjectBuilder::default()
1795 .debug(true)
1796 .open_file(output_file.path())
1797 .unwrap();
1798 }
1799
1800 let obj_path1 = get_test_object_path("usdt.bpf.o");
1801 let obj_path2 = get_test_object_path("ringbuf.bpf.o");
1802
1803 test(vec![obj_path1.clone()]);
1804 test(vec![obj_path1, obj_path2]);
1805 }
1806
1807 /// Get access to the underlying per-cpu ring buffer data.
buffer<'a>(perf: &'a libbpf_rs::PerfBuffer, buf_idx: usize) -> &'a [u8]1808 fn buffer<'a>(perf: &'a libbpf_rs::PerfBuffer, buf_idx: usize) -> &'a [u8] {
1809 let perf_buff_ptr = perf.as_libbpf_object();
1810 let mut buffer_data_ptr: *mut c_void = ptr::null_mut();
1811 let mut buffer_size: usize = 0;
1812 let ret = unsafe {
1813 libbpf_sys::perf_buffer__buffer(
1814 perf_buff_ptr.as_ptr(),
1815 buf_idx as i32,
1816 ptr::addr_of_mut!(buffer_data_ptr),
1817 ptr::addr_of_mut!(buffer_size) as *mut libbpf_sys::size_t,
1818 )
1819 };
1820 assert!(ret >= 0);
1821 unsafe { slice::from_raw_parts(buffer_data_ptr as *const u8, buffer_size) }
1822 }
1823
1824 /// Check that we can see the raw ring buffer of the perf buffer and find a
1825 /// value we have sent.
1826 #[tag(root)]
1827 #[test]
test_object_perf_buffer_raw()1828 fn test_object_perf_buffer_raw() {
1829 use memmem::Searcher;
1830 use memmem::TwoWaySearcher;
1831
1832 bump_rlimit_mlock();
1833
1834 let cookie_val = 42u16;
1835 let mut obj = get_test_object("tracepoint.bpf.o");
1836 let prog = get_prog_mut(&mut obj, "handle__tracepoint_with_cookie_pb");
1837
1838 let opts = TracepointOpts {
1839 cookie: cookie_val.into(),
1840 ..TracepointOpts::default()
1841 };
1842 let _link = prog
1843 .attach_tracepoint_with_opts("syscalls", "sys_enter_getpid", opts)
1844 .expect("failed to attach prog");
1845
1846 let map = get_map_mut(&mut obj, "pb");
1847 let cookie_bytes = cookie_val.to_ne_bytes();
1848 let searcher = TwoWaySearcher::new(&cookie_bytes[..]);
1849
1850 let perf = libbpf_rs::PerfBufferBuilder::new(&map)
1851 .build()
1852 .expect("failed to build");
1853
1854 // Make an action that the tracepoint will see
1855 let _pid = unsafe { libc::getpid() };
1856
1857 let found_cookie = (0..perf.buffer_cnt()).any(|buf_idx| {
1858 let buf = buffer(&perf, buf_idx);
1859 searcher.search_in(buf).is_some()
1860 });
1861
1862 assert!(found_cookie);
1863 }
1864
1865 /// Check that we can get map pin status and map pin path
1866 #[tag(root)]
1867 #[test]
test_map_pinned_status()1868 fn test_map_pinned_status() {
1869 bump_rlimit_mlock();
1870
1871 let mut obj = get_test_object("map_auto_pin.bpf.o");
1872 let map = get_map_mut(&mut obj, "auto_pin_map");
1873 let is_pinned = map.is_pinned();
1874 assert!(is_pinned);
1875 let expected_path = "/sys/fs/bpf/auto_pin_map";
1876 let get_path = map.get_pin_path().expect("get map pin path failed");
1877 assert_eq!(expected_path, get_path.to_str().unwrap());
1878 // cleanup
1879 let _ = fs::remove_file(expected_path);
1880 }
1881
1882 /// Change the root_pin_path and see if it works.
1883 #[tag(root)]
1884 #[test]
test_map_pinned_status_with_pin_root_path()1885 fn test_map_pinned_status_with_pin_root_path() {
1886 bump_rlimit_mlock();
1887
1888 let obj_path = get_test_object_path("map_auto_pin.bpf.o");
1889 let mut obj = ObjectBuilder::default()
1890 .debug(true)
1891 .pin_root_path("/sys/fs/bpf/test_namespace")
1892 .expect("root_pin_path failed")
1893 .open_file(obj_path)
1894 .expect("failed to open object")
1895 .load()
1896 .expect("failed to load object");
1897
1898 let map = get_map_mut(&mut obj, "auto_pin_map");
1899 let is_pinned = map.is_pinned();
1900 assert!(is_pinned);
1901 let expected_path = "/sys/fs/bpf/test_namespace/auto_pin_map";
1902 let get_path = map.get_pin_path().expect("get map pin path failed");
1903 assert_eq!(expected_path, get_path.to_str().unwrap());
1904 // cleanup
1905 let _ = fs::remove_file(expected_path);
1906 let _ = fs::remove_dir("/sys/fs/bpf/test_namespace");
1907 }
1908
1909 /// Check that we can get program fd by id and vice versa.
1910 #[tag(root)]
1911 #[test]
test_program_get_fd_and_id()1912 fn test_program_get_fd_and_id() {
1913 bump_rlimit_mlock();
1914
1915 let mut obj = get_test_object("runqslower.bpf.o");
1916 let prog = get_prog_mut(&mut obj, "handle__sched_wakeup");
1917 let prog_fd = prog.as_fd();
1918 let prog_id = Program::id_from_fd(prog_fd).expect("failed to get program id from fd");
1919 let _owned_prog_fd = Program::fd_from_id(prog_id).expect("failed to get program fd from id");
1920 }
1921
1922 /// Check that autocreate disabled maps don't prevent object loading
1923 #[tag(root)]
1924 #[test]
test_map_autocreate_disable()1925 fn test_map_autocreate_disable() {
1926 bump_rlimit_mlock();
1927
1928 let mut open_obj = open_test_object("map_auto_pin.bpf.o");
1929 let mut auto_pin_map = open_obj
1930 .maps_mut()
1931 .find(|map| map.name() == OsStr::new("auto_pin_map"))
1932 .expect("failed to find `auto_pin_map` map");
1933 auto_pin_map
1934 .set_autocreate(false)
1935 .expect("set_autocreate() failed");
1936
1937 open_obj.load().expect("failed to load object");
1938 }
1939
1940 /// Check that we can resize a map.
1941 #[tag(root)]
1942 #[test]
test_map_resize()1943 fn test_map_resize() {
1944 bump_rlimit_mlock();
1945
1946 let mut open_obj = open_test_object("map_auto_pin.bpf.o");
1947 let mut resizable = open_obj
1948 .maps_mut()
1949 .find(|map| map.name() == OsStr::new(".data.resizable_data"))
1950 .expect("failed to find `.data.resizable_data` map");
1951
1952 let len = resizable.initial_value().unwrap().len();
1953 assert_eq!(len, size_of::<u64>());
1954
1955 let () = resizable
1956 .set_value_size(len as u32 * 2)
1957 .expect("failed to set value size");
1958 let new_len = resizable.initial_value().unwrap().len();
1959 assert_eq!(new_len, len * 2);
1960 }
1961
1962 /// Check that we are able to attach using ksyscall
1963 #[tag(root)]
1964 #[test]
test_attach_ksyscall()1965 fn test_attach_ksyscall() {
1966 bump_rlimit_mlock();
1967
1968 let mut obj = get_test_object("ksyscall.bpf.o");
1969 let prog = get_prog_mut(&mut obj, "handle__ksyscall");
1970 let _link = prog
1971 .attach_ksyscall(false, "kill")
1972 .expect("failed to attach prog");
1973
1974 let map = get_map_mut(&mut obj, "ringbuf");
1975 let action = || {
1976 // Send `SIGCHLD`, which is ignored by default, to our process.
1977 let ret = unsafe { libc::kill(libc::getpid(), libc::SIGCHLD) };
1978 if ret < 0 {
1979 panic!("kill failed: {}", io::Error::last_os_error());
1980 }
1981 };
1982 let result = with_ringbuffer(&map, action);
1983
1984 assert_eq!(result, 1);
1985 }
1986
1987 /// Check that we can invoke a program directly.
1988 #[tag(root)]
1989 #[test]
test_run_prog_success()1990 fn test_run_prog_success() {
1991 bump_rlimit_mlock();
1992
1993 let mut obj = get_test_object("run_prog.bpf.o");
1994 let prog = get_prog_mut(&mut obj, "test_1");
1995
1996 #[repr(C)]
1997 struct bpf_dummy_ops_state {
1998 val: c_int,
1999 }
2000
2001 let value = 42;
2002 let state = bpf_dummy_ops_state { val: value };
2003 let mut args = [addr_of!(state) as u64];
2004 let input = ProgramInput {
2005 context_in: Some(unsafe {
2006 slice::from_raw_parts_mut(&mut args as *mut _ as *mut u8, size_of_val(&args))
2007 }),
2008 ..Default::default()
2009 };
2010 let output = prog.test_run(input).unwrap();
2011 assert_eq!(output.return_value, value as _);
2012 }
2013
2014 /// Check that we fail program invocation when providing insufficient arguments.
2015 #[tag(root)]
2016 #[test]
test_run_prog_fail()2017 fn test_run_prog_fail() {
2018 bump_rlimit_mlock();
2019
2020 let mut obj = get_test_object("run_prog.bpf.o");
2021 let prog = get_prog_mut(&mut obj, "test_2");
2022
2023 let input = ProgramInput::default();
2024 let _err = prog.test_run(input).unwrap_err();
2025 }
2026