1 use core::ffi::c_void;
2 use std::ffi::CStr;
3 use std::ffi::CString;
4 use std::ffi::OsStr;
5 use std::ffi::OsString;
6 use std::fmt::Debug;
7 use std::fs::remove_file;
8 use std::io;
9 use std::marker::PhantomData;
10 use std::mem;
11 use std::mem::transmute;
12 use std::ops::Deref;
13 use std::os::unix::ffi::OsStrExt;
14 use std::os::unix::io::AsFd;
15 use std::os::unix::io::AsRawFd;
16 use std::os::unix::io::BorrowedFd;
17 use std::os::unix::io::FromRawFd;
18 use std::os::unix::io::OwnedFd;
19 use std::os::unix::io::RawFd;
20 use std::path::Path;
21 use std::ptr;
22 use std::ptr::NonNull;
23 use std::slice;
24 use std::slice::from_raw_parts;
25
26 use bitflags::bitflags;
27 use libbpf_sys::bpf_map_info;
28 use libbpf_sys::bpf_obj_get_info_by_fd;
29
30 use crate::util;
31 use crate::util::parse_ret_i32;
32 use crate::util::validate_bpf_ret;
33 use crate::AsRawLibbpf;
34 use crate::Error;
35 use crate::ErrorExt as _;
36 use crate::Link;
37 use crate::Mut;
38 use crate::Result;
39
40 /// An immutable parsed but not yet loaded BPF map.
41 pub type OpenMap<'obj> = OpenMapImpl<'obj>;
42 /// A mutable parsed but not yet loaded BPF map.
43 pub type OpenMapMut<'obj> = OpenMapImpl<'obj, Mut>;
44
45
46 /// Represents a parsed but not yet loaded BPF map.
47 ///
48 /// This object exposes operations that need to happen before the map is created.
49 ///
50 /// Some methods require working with raw bytes. You may find libraries such as
51 /// [`plain`](https://crates.io/crates/plain) helpful.
52 #[derive(Debug)]
53 #[repr(transparent)]
54 pub struct OpenMapImpl<'obj, T = ()> {
55 ptr: NonNull<libbpf_sys::bpf_map>,
56 _phantom: PhantomData<&'obj T>,
57 }
58
59 // TODO: Document members.
60 #[allow(missing_docs)]
61 impl<'obj> OpenMap<'obj> {
62 /// Create a new [`OpenMap`] from a ptr to a `libbpf_sys::bpf_map`.
new(object: &'obj libbpf_sys::bpf_map) -> Self63 pub fn new(object: &'obj libbpf_sys::bpf_map) -> Self {
64 // SAFETY: We inferred the address from a reference, which is always
65 // valid.
66 Self {
67 ptr: unsafe { NonNull::new_unchecked(object as *const _ as *mut _) },
68 _phantom: PhantomData,
69 }
70 }
71
72 /// Retrieve the [`OpenMap`]'s name.
name(&self) -> &OsStr73 pub fn name(&self) -> &OsStr {
74 // SAFETY: We ensured `ptr` is valid during construction.
75 let name_ptr = unsafe { libbpf_sys::bpf_map__name(self.ptr.as_ptr()) };
76 // SAFETY: `bpf_map__name` can return NULL but only if it's passed
77 // NULL. We know `ptr` is not NULL.
78 let name_c_str = unsafe { CStr::from_ptr(name_ptr) };
79 OsStr::from_bytes(name_c_str.to_bytes())
80 }
81
82 /// Retrieve type of the map.
map_type(&self) -> MapType83 pub fn map_type(&self) -> MapType {
84 let ty = unsafe { libbpf_sys::bpf_map__type(self.ptr.as_ptr()) };
85 MapType::from(ty)
86 }
87
initial_value_raw(&self) -> (*mut u8, usize)88 fn initial_value_raw(&self) -> (*mut u8, usize) {
89 let mut size = 0u64;
90 let ptr = unsafe {
91 libbpf_sys::bpf_map__initial_value(self.ptr.as_ptr(), &mut size as *mut _ as _)
92 };
93 (ptr.cast(), size as _)
94 }
95
96 /// Retrieve the initial value of the map.
initial_value(&self) -> Option<&[u8]>97 pub fn initial_value(&self) -> Option<&[u8]> {
98 let (ptr, size) = self.initial_value_raw();
99 if ptr.is_null() {
100 None
101 } else {
102 let data = unsafe { slice::from_raw_parts(ptr.cast::<u8>(), size) };
103 Some(data)
104 }
105 }
106 }
107
108 impl<'obj> OpenMapMut<'obj> {
109 /// Create a new [`OpenMapMut`] from a ptr to a `libbpf_sys::bpf_map`.
new_mut(object: &'obj mut libbpf_sys::bpf_map) -> Self110 pub fn new_mut(object: &'obj mut libbpf_sys::bpf_map) -> Self {
111 Self {
112 ptr: unsafe { NonNull::new_unchecked(object as *mut _) },
113 _phantom: PhantomData,
114 }
115 }
116
117 /// Retrieve the initial value of the map.
initial_value_mut(&mut self) -> Option<&mut [u8]>118 pub fn initial_value_mut(&mut self) -> Option<&mut [u8]> {
119 let (ptr, size) = self.initial_value_raw();
120 if ptr.is_null() {
121 None
122 } else {
123 let data = unsafe { slice::from_raw_parts_mut(ptr.cast::<u8>(), size) };
124 Some(data)
125 }
126 }
127
128 /// Bind map to a particular network device.
129 ///
130 /// Used for offloading maps to hardware.
set_map_ifindex(&mut self, idx: u32)131 pub fn set_map_ifindex(&mut self, idx: u32) {
132 unsafe { libbpf_sys::bpf_map__set_ifindex(self.ptr.as_ptr(), idx) };
133 }
134
135 /// Set the initial value of the map.
set_initial_value(&mut self, data: &[u8]) -> Result<()>136 pub fn set_initial_value(&mut self, data: &[u8]) -> Result<()> {
137 let ret = unsafe {
138 libbpf_sys::bpf_map__set_initial_value(
139 self.ptr.as_ptr(),
140 data.as_ptr() as *const c_void,
141 data.len() as libbpf_sys::size_t,
142 )
143 };
144
145 util::parse_ret(ret)
146 }
147
148 /// Set the type of the map.
set_type(&mut self, ty: MapType) -> Result<()>149 pub fn set_type(&mut self, ty: MapType) -> Result<()> {
150 let ret = unsafe { libbpf_sys::bpf_map__set_type(self.ptr.as_ptr(), ty as u32) };
151 util::parse_ret(ret)
152 }
153
154 /// Set the key size of the map in bytes.
set_key_size(&mut self, size: u32) -> Result<()>155 pub fn set_key_size(&mut self, size: u32) -> Result<()> {
156 let ret = unsafe { libbpf_sys::bpf_map__set_key_size(self.ptr.as_ptr(), size) };
157 util::parse_ret(ret)
158 }
159
160 /// Set the value size of the map in bytes.
set_value_size(&mut self, size: u32) -> Result<()>161 pub fn set_value_size(&mut self, size: u32) -> Result<()> {
162 let ret = unsafe { libbpf_sys::bpf_map__set_value_size(self.ptr.as_ptr(), size) };
163 util::parse_ret(ret)
164 }
165
166 /// Set the maximum number of entries this map can have.
set_max_entries(&mut self, count: u32) -> Result<()>167 pub fn set_max_entries(&mut self, count: u32) -> Result<()> {
168 let ret = unsafe { libbpf_sys::bpf_map__set_max_entries(self.ptr.as_ptr(), count) };
169 util::parse_ret(ret)
170 }
171
172 /// Set flags on this map.
set_map_flags(&mut self, flags: u32) -> Result<()>173 pub fn set_map_flags(&mut self, flags: u32) -> Result<()> {
174 let ret = unsafe { libbpf_sys::bpf_map__set_map_flags(self.ptr.as_ptr(), flags) };
175 util::parse_ret(ret)
176 }
177
178 // TODO: Document member.
179 #[allow(missing_docs)]
set_numa_node(&mut self, numa_node: u32) -> Result<()>180 pub fn set_numa_node(&mut self, numa_node: u32) -> Result<()> {
181 let ret = unsafe { libbpf_sys::bpf_map__set_numa_node(self.ptr.as_ptr(), numa_node) };
182 util::parse_ret(ret)
183 }
184
185 // TODO: Document member.
186 #[allow(missing_docs)]
set_inner_map_fd(&mut self, inner_map_fd: BorrowedFd<'_>) -> Result<()>187 pub fn set_inner_map_fd(&mut self, inner_map_fd: BorrowedFd<'_>) -> Result<()> {
188 let ret = unsafe {
189 libbpf_sys::bpf_map__set_inner_map_fd(self.ptr.as_ptr(), inner_map_fd.as_raw_fd())
190 };
191 util::parse_ret(ret)
192 }
193
194 // TODO: Document member.
195 #[allow(missing_docs)]
set_map_extra(&mut self, map_extra: u64) -> Result<()>196 pub fn set_map_extra(&mut self, map_extra: u64) -> Result<()> {
197 let ret = unsafe { libbpf_sys::bpf_map__set_map_extra(self.ptr.as_ptr(), map_extra) };
198 util::parse_ret(ret)
199 }
200
201 /// Set whether or not libbpf should automatically create this map during load phase.
set_autocreate(&mut self, autocreate: bool) -> Result<()>202 pub fn set_autocreate(&mut self, autocreate: bool) -> Result<()> {
203 let ret = unsafe { libbpf_sys::bpf_map__set_autocreate(self.ptr.as_ptr(), autocreate) };
204 util::parse_ret(ret)
205 }
206
207 /// Set where the map should be pinned.
208 ///
209 /// Note this does not actually create the pin.
set_pin_path<P: AsRef<Path>>(&mut self, path: P) -> Result<()>210 pub fn set_pin_path<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
211 let path_c = util::path_to_cstring(path)?;
212 let path_ptr = path_c.as_ptr();
213
214 let ret = unsafe { libbpf_sys::bpf_map__set_pin_path(self.ptr.as_ptr(), path_ptr) };
215 util::parse_ret(ret)
216 }
217
218 /// Reuse an fd for a BPF map
reuse_fd(&mut self, fd: BorrowedFd<'_>) -> Result<()>219 pub fn reuse_fd(&mut self, fd: BorrowedFd<'_>) -> Result<()> {
220 let ret = unsafe { libbpf_sys::bpf_map__reuse_fd(self.ptr.as_ptr(), fd.as_raw_fd()) };
221 util::parse_ret(ret)
222 }
223
224 /// Reuse an already-pinned map for `self`.
reuse_pinned_map<P: AsRef<Path>>(&mut self, path: P) -> Result<()>225 pub fn reuse_pinned_map<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
226 let cstring = util::path_to_cstring(path)?;
227
228 let fd = unsafe { libbpf_sys::bpf_obj_get(cstring.as_ptr()) };
229 if fd < 0 {
230 return Err(Error::from(io::Error::last_os_error()));
231 }
232
233 let fd = unsafe { OwnedFd::from_raw_fd(fd) };
234
235 let reuse_result = self.reuse_fd(fd.as_fd());
236
237 reuse_result
238 }
239 }
240
241 impl<'obj> Deref for OpenMapMut<'obj> {
242 type Target = OpenMap<'obj>;
243
deref(&self) -> &Self::Target244 fn deref(&self) -> &Self::Target {
245 // SAFETY: `OpenMapImpl` is `repr(transparent)` and so in-memory
246 // representation of both types is the same.
247 unsafe { transmute::<&OpenMapMut<'obj>, &OpenMap<'obj>>(self) }
248 }
249 }
250
251 impl<T> AsRawLibbpf for OpenMapImpl<'_, T> {
252 type LibbpfType = libbpf_sys::bpf_map;
253
254 /// Retrieve the underlying [`libbpf_sys::bpf_map`].
as_libbpf_object(&self) -> NonNull<Self::LibbpfType>255 fn as_libbpf_object(&self) -> NonNull<Self::LibbpfType> {
256 self.ptr
257 }
258 }
259
map_fd(map: NonNull<libbpf_sys::bpf_map>) -> Option<RawFd>260 pub(crate) fn map_fd(map: NonNull<libbpf_sys::bpf_map>) -> Option<RawFd> {
261 let fd = unsafe { libbpf_sys::bpf_map__fd(map.as_ptr()) };
262 let fd = util::parse_ret_i32(fd).ok().map(|fd| fd as RawFd);
263 fd
264 }
265
266 /// Return the size of one value including padding for interacting with per-cpu
267 /// maps. The values are aligned to 8 bytes.
percpu_aligned_value_size<M>(map: &M) -> usize where M: MapCore + ?Sized,268 fn percpu_aligned_value_size<M>(map: &M) -> usize
269 where
270 M: MapCore + ?Sized,
271 {
272 let val_size = map.value_size() as usize;
273 util::roundup(val_size, 8)
274 }
275
276 /// Returns the size of the buffer needed for a lookup/update of a per-cpu map.
percpu_buffer_size<M>(map: &M) -> Result<usize> where M: MapCore + ?Sized,277 fn percpu_buffer_size<M>(map: &M) -> Result<usize>
278 where
279 M: MapCore + ?Sized,
280 {
281 let aligned_val_size = percpu_aligned_value_size(map);
282 let ncpu = crate::num_possible_cpus()?;
283 Ok(ncpu * aligned_val_size)
284 }
285
286 /// Apply a key check and return a null pointer in case of dealing with queue/stack/bloom-filter
287 /// map, before passing the key to the bpf functions that support the map of type
288 /// queue/stack/bloom-filter.
map_key<M>(map: &M, key: &[u8]) -> *const c_void where M: MapCore + ?Sized,289 fn map_key<M>(map: &M, key: &[u8]) -> *const c_void
290 where
291 M: MapCore + ?Sized,
292 {
293 // For all they keyless maps we null out the key per documentation of libbpf
294 if map.key_size() == 0 && map.map_type().is_keyless() {
295 return ptr::null();
296 }
297
298 key.as_ptr() as *const c_void
299 }
300
301 /// Internal function to return a value from a map into a buffer of the given size.
lookup_raw<M>(map: &M, key: &[u8], flags: MapFlags, out_size: usize) -> Result<Option<Vec<u8>>> where M: MapCore + ?Sized,302 fn lookup_raw<M>(map: &M, key: &[u8], flags: MapFlags, out_size: usize) -> Result<Option<Vec<u8>>>
303 where
304 M: MapCore + ?Sized,
305 {
306 if key.len() != map.key_size() as usize {
307 return Err(Error::with_invalid_data(format!(
308 "key_size {} != {}",
309 key.len(),
310 map.key_size()
311 )));
312 };
313
314 let mut out: Vec<u8> = Vec::with_capacity(out_size);
315
316 let ret = unsafe {
317 libbpf_sys::bpf_map_lookup_elem_flags(
318 map.as_fd().as_raw_fd(),
319 map_key(map, key),
320 out.as_mut_ptr() as *mut c_void,
321 flags.bits(),
322 )
323 };
324
325 if ret == 0 {
326 unsafe {
327 out.set_len(out_size);
328 }
329 Ok(Some(out))
330 } else {
331 let err = io::Error::last_os_error();
332 if err.kind() == io::ErrorKind::NotFound {
333 Ok(None)
334 } else {
335 Err(Error::from(err))
336 }
337 }
338 }
339
340 /// Internal function to update a map. This does not check the length of the
341 /// supplied value.
update_raw<M>(map: &M, key: &[u8], value: &[u8], flags: MapFlags) -> Result<()> where M: MapCore + ?Sized,342 fn update_raw<M>(map: &M, key: &[u8], value: &[u8], flags: MapFlags) -> Result<()>
343 where
344 M: MapCore + ?Sized,
345 {
346 if key.len() != map.key_size() as usize {
347 return Err(Error::with_invalid_data(format!(
348 "key_size {} != {}",
349 key.len(),
350 map.key_size()
351 )));
352 };
353
354 let ret = unsafe {
355 libbpf_sys::bpf_map_update_elem(
356 map.as_fd().as_raw_fd(),
357 map_key(map, key),
358 value.as_ptr() as *const c_void,
359 flags.bits(),
360 )
361 };
362
363 util::parse_ret(ret)
364 }
365
366 #[allow(clippy::wildcard_imports)]
367 mod private {
368 use super::*;
369
370 pub trait Sealed {}
371
372 impl<T> Sealed for MapImpl<'_, T> {}
373 impl Sealed for MapHandle {}
374 }
375
376 /// A trait representing core functionality common to fully initialized maps.
377 pub trait MapCore: Debug + AsFd + private::Sealed {
378 /// Retrieve the map's name.
name(&self) -> &OsStr379 fn name(&self) -> &OsStr;
380
381 /// Retrieve type of the map.
map_type(&self) -> MapType382 fn map_type(&self) -> MapType;
383
384 /// Retrieve the size of the map's keys.
key_size(&self) -> u32385 fn key_size(&self) -> u32;
386
387 /// Retrieve the size of the map's values.
value_size(&self) -> u32388 fn value_size(&self) -> u32;
389
390 /// Fetch extra map information
391 #[inline]
info(&self) -> Result<MapInfo>392 fn info(&self) -> Result<MapInfo> {
393 MapInfo::new(self.as_fd())
394 }
395
396 /// Returns an iterator over keys in this map
397 ///
398 /// Note that if the map is not stable (stable meaning no updates or deletes) during iteration,
399 /// iteration can skip keys, restart from the beginning, or duplicate keys. In other words,
400 /// iteration becomes unpredictable.
keys(&self) -> MapKeyIter<'_>401 fn keys(&self) -> MapKeyIter<'_> {
402 MapKeyIter::new(self.as_fd(), self.key_size())
403 }
404
405 /// Returns map value as `Vec` of `u8`.
406 ///
407 /// `key` must have exactly [`Self::key_size()`] elements.
408 ///
409 /// If the map is one of the per-cpu data structures, the function [`Self::lookup_percpu()`]
410 /// must be used.
411 /// If the map is of type bloom_filter the function [`Self::lookup_bloom_filter()`] must be used
lookup(&self, key: &[u8], flags: MapFlags) -> Result<Option<Vec<u8>>>412 fn lookup(&self, key: &[u8], flags: MapFlags) -> Result<Option<Vec<u8>>> {
413 if self.map_type().is_bloom_filter() {
414 return Err(Error::with_invalid_data(
415 "lookup_bloom_filter() must be used for bloom filter maps",
416 ));
417 }
418 if self.map_type().is_percpu() {
419 return Err(Error::with_invalid_data(format!(
420 "lookup_percpu() must be used for per-cpu maps (type of the map is {:?})",
421 self.map_type(),
422 )));
423 }
424
425 let out_size = self.value_size() as usize;
426 lookup_raw(self, key, flags, out_size)
427 }
428
429 /// Returns if the given value is likely present in bloom_filter as `bool`.
430 ///
431 /// `value` must have exactly [`Self::value_size()`] elements.
lookup_bloom_filter(&self, value: &[u8]) -> Result<bool>432 fn lookup_bloom_filter(&self, value: &[u8]) -> Result<bool> {
433 let ret = unsafe {
434 libbpf_sys::bpf_map_lookup_elem(
435 self.as_fd().as_raw_fd(),
436 ptr::null(),
437 value.to_vec().as_mut_ptr() as *mut c_void,
438 )
439 };
440
441 if ret == 0 {
442 Ok(true)
443 } else {
444 let err = io::Error::last_os_error();
445 if err.kind() == io::ErrorKind::NotFound {
446 Ok(false)
447 } else {
448 Err(Error::from(err))
449 }
450 }
451 }
452
453 /// Returns one value per cpu as `Vec` of `Vec` of `u8` for per per-cpu maps.
454 ///
455 /// For normal maps, [`Self::lookup()`] must be used.
lookup_percpu(&self, key: &[u8], flags: MapFlags) -> Result<Option<Vec<Vec<u8>>>>456 fn lookup_percpu(&self, key: &[u8], flags: MapFlags) -> Result<Option<Vec<Vec<u8>>>> {
457 if !self.map_type().is_percpu() && self.map_type() != MapType::Unknown {
458 return Err(Error::with_invalid_data(format!(
459 "lookup() must be used for maps that are not per-cpu (type of the map is {:?})",
460 self.map_type(),
461 )));
462 }
463
464 let val_size = self.value_size() as usize;
465 let aligned_val_size = percpu_aligned_value_size(self);
466 let out_size = percpu_buffer_size(self)?;
467
468 let raw_res = lookup_raw(self, key, flags, out_size)?;
469 if let Some(raw_vals) = raw_res {
470 let mut out = Vec::new();
471 for chunk in raw_vals.chunks_exact(aligned_val_size) {
472 out.push(chunk[..val_size].to_vec());
473 }
474 Ok(Some(out))
475 } else {
476 Ok(None)
477 }
478 }
479
480 /// Deletes an element from the map.
481 ///
482 /// `key` must have exactly [`Self::key_size()`] elements.
delete(&self, key: &[u8]) -> Result<()>483 fn delete(&self, key: &[u8]) -> Result<()> {
484 if key.len() != self.key_size() as usize {
485 return Err(Error::with_invalid_data(format!(
486 "key_size {} != {}",
487 key.len(),
488 self.key_size()
489 )));
490 };
491
492 let ret = unsafe {
493 libbpf_sys::bpf_map_delete_elem(self.as_fd().as_raw_fd(), key.as_ptr() as *const c_void)
494 };
495 util::parse_ret(ret)
496 }
497
498 /// Deletes many elements in batch mode from the map.
499 ///
500 /// `keys` must have exactly `Self::key_size() * count` elements.
delete_batch( &self, keys: &[u8], count: u32, elem_flags: MapFlags, flags: MapFlags, ) -> Result<()>501 fn delete_batch(
502 &self,
503 keys: &[u8],
504 count: u32,
505 elem_flags: MapFlags,
506 flags: MapFlags,
507 ) -> Result<()> {
508 if keys.len() as u32 / count != self.key_size() || (keys.len() as u32) % count != 0 {
509 return Err(Error::with_invalid_data(format!(
510 "batch key_size {} != {} * {}",
511 keys.len(),
512 self.key_size(),
513 count
514 )));
515 };
516
517 #[allow(clippy::needless_update)]
518 let opts = libbpf_sys::bpf_map_batch_opts {
519 sz: mem::size_of::<libbpf_sys::bpf_map_batch_opts>() as _,
520 elem_flags: elem_flags.bits(),
521 flags: flags.bits(),
522 // bpf_map_batch_opts might have padding fields on some platform
523 ..Default::default()
524 };
525
526 let mut count = count;
527 let ret = unsafe {
528 libbpf_sys::bpf_map_delete_batch(
529 self.as_fd().as_raw_fd(),
530 keys.as_ptr() as *const c_void,
531 &mut count,
532 &opts as *const libbpf_sys::bpf_map_batch_opts,
533 )
534 };
535 util::parse_ret(ret)
536 }
537
538 /// Same as [`Self::lookup()`] except this also deletes the key from the map.
539 ///
540 /// Note that this operation is currently only implemented in the kernel for [`MapType::Queue`]
541 /// and [`MapType::Stack`].
542 ///
543 /// `key` must have exactly [`Self::key_size()`] elements.
lookup_and_delete(&self, key: &[u8]) -> Result<Option<Vec<u8>>>544 fn lookup_and_delete(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
545 if key.len() != self.key_size() as usize {
546 return Err(Error::with_invalid_data(format!(
547 "key_size {} != {}",
548 key.len(),
549 self.key_size()
550 )));
551 };
552
553 let mut out: Vec<u8> = Vec::with_capacity(self.value_size() as usize);
554
555 let ret = unsafe {
556 libbpf_sys::bpf_map_lookup_and_delete_elem(
557 self.as_fd().as_raw_fd(),
558 map_key(self, key),
559 out.as_mut_ptr() as *mut c_void,
560 )
561 };
562
563 if ret == 0 {
564 unsafe {
565 out.set_len(self.value_size() as usize);
566 }
567 Ok(Some(out))
568 } else {
569 let err = io::Error::last_os_error();
570 if err.kind() == io::ErrorKind::NotFound {
571 Ok(None)
572 } else {
573 Err(Error::from(err))
574 }
575 }
576 }
577
578 /// Update an element.
579 ///
580 /// `key` must have exactly [`Self::key_size()`] elements. `value` must have exactly
581 /// [`Self::value_size()`] elements.
582 ///
583 /// For per-cpu maps, [`Self::update_percpu()`] must be used.
update(&self, key: &[u8], value: &[u8], flags: MapFlags) -> Result<()>584 fn update(&self, key: &[u8], value: &[u8], flags: MapFlags) -> Result<()> {
585 if self.map_type().is_percpu() {
586 return Err(Error::with_invalid_data(format!(
587 "update_percpu() must be used for per-cpu maps (type of the map is {:?})",
588 self.map_type(),
589 )));
590 }
591
592 if value.len() != self.value_size() as usize {
593 return Err(Error::with_invalid_data(format!(
594 "value_size {} != {}",
595 value.len(),
596 self.value_size()
597 )));
598 };
599
600 update_raw(self, key, value, flags)
601 }
602
603 /// Updates many elements in batch mode in the map
604 ///
605 /// `keys` must have exactly `Self::key_size() * count` elements. `values` must have exactly
606 /// `Self::key_size() * count` elements.
update_batch( &self, keys: &[u8], values: &[u8], count: u32, elem_flags: MapFlags, flags: MapFlags, ) -> Result<()>607 fn update_batch(
608 &self,
609 keys: &[u8],
610 values: &[u8],
611 count: u32,
612 elem_flags: MapFlags,
613 flags: MapFlags,
614 ) -> Result<()> {
615 if keys.len() as u32 / count != self.key_size() || (keys.len() as u32) % count != 0 {
616 return Err(Error::with_invalid_data(format!(
617 "batch key_size {} != {} * {}",
618 keys.len(),
619 self.key_size(),
620 count
621 )));
622 };
623
624 if values.len() as u32 / count != self.value_size() || (values.len() as u32) % count != 0 {
625 return Err(Error::with_invalid_data(format!(
626 "batch value_size {} != {} * {}",
627 values.len(),
628 self.value_size(),
629 count
630 )));
631 }
632
633 #[allow(clippy::needless_update)]
634 let opts = libbpf_sys::bpf_map_batch_opts {
635 sz: mem::size_of::<libbpf_sys::bpf_map_batch_opts>() as _,
636 elem_flags: elem_flags.bits(),
637 flags: flags.bits(),
638 // bpf_map_batch_opts might have padding fields on some platform
639 ..Default::default()
640 };
641
642 let mut count = count;
643 let ret = unsafe {
644 libbpf_sys::bpf_map_update_batch(
645 self.as_fd().as_raw_fd(),
646 keys.as_ptr() as *const c_void,
647 values.as_ptr() as *const c_void,
648 &mut count,
649 &opts as *const libbpf_sys::bpf_map_batch_opts,
650 )
651 };
652
653 util::parse_ret(ret)
654 }
655
656 /// Update an element in an per-cpu map with one value per cpu.
657 ///
658 /// `key` must have exactly [`Self::key_size()`] elements. `value` must have one
659 /// element per cpu (see [`num_possible_cpus`][crate::num_possible_cpus])
660 /// with exactly [`Self::value_size()`] elements each.
661 ///
662 /// For per-cpu maps, [`Self::update_percpu()`] must be used.
update_percpu(&self, key: &[u8], values: &[Vec<u8>], flags: MapFlags) -> Result<()>663 fn update_percpu(&self, key: &[u8], values: &[Vec<u8>], flags: MapFlags) -> Result<()> {
664 if !self.map_type().is_percpu() && self.map_type() != MapType::Unknown {
665 return Err(Error::with_invalid_data(format!(
666 "update() must be used for maps that are not per-cpu (type of the map is {:?})",
667 self.map_type(),
668 )));
669 }
670
671 if values.len() != crate::num_possible_cpus()? {
672 return Err(Error::with_invalid_data(format!(
673 "number of values {} != number of cpus {}",
674 values.len(),
675 crate::num_possible_cpus()?
676 )));
677 };
678
679 let val_size = self.value_size() as usize;
680 let aligned_val_size = percpu_aligned_value_size(self);
681 let buf_size = percpu_buffer_size(self)?;
682
683 let mut value_buf = vec![0; buf_size];
684
685 for (i, val) in values.iter().enumerate() {
686 if val.len() != val_size {
687 return Err(Error::with_invalid_data(format!(
688 "value size for cpu {} is {} != {}",
689 i,
690 val.len(),
691 val_size
692 )));
693 }
694
695 value_buf[(i * aligned_val_size)..(i * aligned_val_size + val_size)]
696 .copy_from_slice(val);
697 }
698
699 update_raw(self, key, &value_buf, flags)
700 }
701 }
702
703 /// An immutable loaded BPF map.
704 pub type Map<'obj> = MapImpl<'obj>;
705 /// A mutable loaded BPF map.
706 pub type MapMut<'obj> = MapImpl<'obj, Mut>;
707
708 /// Represents a libbpf-created map.
709 ///
710 /// Some methods require working with raw bytes. You may find libraries such as
711 /// [`plain`](https://crates.io/crates/plain) helpful.
712 #[derive(Debug)]
713 pub struct MapImpl<'obj, T = ()> {
714 ptr: NonNull<libbpf_sys::bpf_map>,
715 _phantom: PhantomData<&'obj T>,
716 }
717
718 impl<'obj> Map<'obj> {
719 /// Create a [`Map`] from a [`libbpf_sys::bpf_map`].
new(map: &'obj libbpf_sys::bpf_map) -> Self720 pub fn new(map: &'obj libbpf_sys::bpf_map) -> Self {
721 // SAFETY: We inferred the address from a reference, which is always
722 // valid.
723 let ptr = unsafe { NonNull::new_unchecked(map as *const _ as *mut _) };
724 assert!(
725 map_fd(ptr).is_some(),
726 "provided BPF map does not have file descriptor"
727 );
728
729 Self {
730 ptr,
731 _phantom: PhantomData,
732 }
733 }
734
735 /// Create a [`Map`] from a [`libbpf_sys::bpf_map`] that does not contain a
736 /// file descriptor.
737 ///
738 /// The caller has to ensure that the [`AsFd`] impl is not used, or a panic
739 /// will be the result.
740 ///
741 /// # Safety
742 ///
743 /// The pointer must point to a loaded map.
744 #[doc(hidden)]
from_map_without_fd(ptr: NonNull<libbpf_sys::bpf_map>) -> Self745 pub unsafe fn from_map_without_fd(ptr: NonNull<libbpf_sys::bpf_map>) -> Self {
746 Self {
747 ptr,
748 _phantom: PhantomData,
749 }
750 }
751
752 /// Returns whether map is pinned or not flag
is_pinned(&self) -> bool753 pub fn is_pinned(&self) -> bool {
754 unsafe { libbpf_sys::bpf_map__is_pinned(self.ptr.as_ptr()) }
755 }
756
757 /// Returns the pin_path if the map is pinned, otherwise, None is returned
get_pin_path(&self) -> Option<&OsStr>758 pub fn get_pin_path(&self) -> Option<&OsStr> {
759 let path_ptr = unsafe { libbpf_sys::bpf_map__pin_path(self.ptr.as_ptr()) };
760 if path_ptr.is_null() {
761 // means map is not pinned
762 return None;
763 }
764 let path_c_str = unsafe { CStr::from_ptr(path_ptr) };
765 Some(OsStr::from_bytes(path_c_str.to_bytes()))
766 }
767 }
768
769 impl<'obj> MapMut<'obj> {
770 /// Create a [`MapMut`] from a [`libbpf_sys::bpf_map`].
new_mut(map: &'obj mut libbpf_sys::bpf_map) -> Self771 pub fn new_mut(map: &'obj mut libbpf_sys::bpf_map) -> Self {
772 // SAFETY: We inferred the address from a reference, which is always
773 // valid.
774 let ptr = unsafe { NonNull::new_unchecked(map as *mut _) };
775 assert!(
776 map_fd(ptr).is_some(),
777 "provided BPF map does not have file descriptor"
778 );
779
780 Self {
781 ptr,
782 _phantom: PhantomData,
783 }
784 }
785
786 /// [Pin](https://facebookmicrosites.github.io/bpf/blog/2018/08/31/object-lifetime.html#bpffs)
787 /// this map to bpffs.
pin<P: AsRef<Path>>(&mut self, path: P) -> Result<()>788 pub fn pin<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
789 let path_c = util::path_to_cstring(path)?;
790 let path_ptr = path_c.as_ptr();
791
792 let ret = unsafe { libbpf_sys::bpf_map__pin(self.ptr.as_ptr(), path_ptr) };
793 util::parse_ret(ret)
794 }
795
796 /// [Unpin](https://facebookmicrosites.github.io/bpf/blog/2018/08/31/object-lifetime.html#bpffs)
797 /// this map from bpffs.
unpin<P: AsRef<Path>>(&mut self, path: P) -> Result<()>798 pub fn unpin<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
799 let path_c = util::path_to_cstring(path)?;
800 let path_ptr = path_c.as_ptr();
801 let ret = unsafe { libbpf_sys::bpf_map__unpin(self.ptr.as_ptr(), path_ptr) };
802 util::parse_ret(ret)
803 }
804
805 /// Attach a struct ops map
attach_struct_ops(&mut self) -> Result<Link>806 pub fn attach_struct_ops(&mut self) -> Result<Link> {
807 if self.map_type() != MapType::StructOps {
808 return Err(Error::with_invalid_data(format!(
809 "Invalid map type ({:?}) for attach_struct_ops()",
810 self.map_type(),
811 )));
812 }
813
814 let ptr = unsafe { libbpf_sys::bpf_map__attach_struct_ops(self.ptr.as_ptr()) };
815 let ptr = validate_bpf_ret(ptr).context("failed to attach struct_ops")?;
816 // SAFETY: the pointer came from libbpf and has been checked for errors.
817 let link = unsafe { Link::new(ptr) };
818 Ok(link)
819 }
820 }
821
822 impl<'obj> Deref for MapMut<'obj> {
823 type Target = Map<'obj>;
824
deref(&self) -> &Self::Target825 fn deref(&self) -> &Self::Target {
826 unsafe { transmute::<&MapMut<'obj>, &Map<'obj>>(self) }
827 }
828 }
829
830 impl<T> AsFd for MapImpl<'_, T> {
831 #[inline]
as_fd(&self) -> BorrowedFd<'_>832 fn as_fd(&self) -> BorrowedFd<'_> {
833 // SANITY: Our map must always have a file descriptor associated with
834 // it.
835 let fd = map_fd(self.ptr).unwrap();
836 // SAFETY: `fd` is guaranteed to be valid for the lifetime of
837 // the created object.
838 let fd = unsafe { BorrowedFd::borrow_raw(fd as _) };
839 fd
840 }
841 }
842
843 impl<T> MapCore for MapImpl<'_, T>
844 where
845 T: Debug,
846 {
name(&self) -> &OsStr847 fn name(&self) -> &OsStr {
848 // SAFETY: We ensured `ptr` is valid during construction.
849 let name_ptr = unsafe { libbpf_sys::bpf_map__name(self.ptr.as_ptr()) };
850 // SAFETY: `bpf_map__name` can return NULL but only if it's passed
851 // NULL. We know `ptr` is not NULL.
852 let name_c_str = unsafe { CStr::from_ptr(name_ptr) };
853 OsStr::from_bytes(name_c_str.to_bytes())
854 }
855
856 #[inline]
map_type(&self) -> MapType857 fn map_type(&self) -> MapType {
858 let ty = unsafe { libbpf_sys::bpf_map__type(self.ptr.as_ptr()) };
859 MapType::from(ty)
860 }
861
862 #[inline]
key_size(&self) -> u32863 fn key_size(&self) -> u32 {
864 unsafe { libbpf_sys::bpf_map__key_size(self.ptr.as_ptr()) }
865 }
866
867 #[inline]
value_size(&self) -> u32868 fn value_size(&self) -> u32 {
869 unsafe { libbpf_sys::bpf_map__value_size(self.ptr.as_ptr()) }
870 }
871 }
872
873 impl AsRawLibbpf for Map<'_> {
874 type LibbpfType = libbpf_sys::bpf_map;
875
876 /// Retrieve the underlying [`libbpf_sys::bpf_map`].
877 #[inline]
as_libbpf_object(&self) -> NonNull<Self::LibbpfType>878 fn as_libbpf_object(&self) -> NonNull<Self::LibbpfType> {
879 self.ptr
880 }
881 }
882
883 /// A handle to a map. Handles can be duplicated and dropped.
884 ///
885 /// While possible to [created directly][MapHandle::create], in many cases it is
886 /// useful to create such a handle from an existing [`Map`]:
887 /// ```no_run
888 /// # use libbpf_rs::Map;
889 /// # use libbpf_rs::MapHandle;
890 /// # let get_map = || -> &Map { todo!() };
891 /// let map: &Map = get_map();
892 /// let map_handle = MapHandle::try_from(map).unwrap();
893 /// ```
894 ///
895 /// Some methods require working with raw bytes. You may find libraries such as
896 /// [`plain`](https://crates.io/crates/plain) helpful.
897 #[derive(Debug)]
898 pub struct MapHandle {
899 fd: OwnedFd,
900 name: OsString,
901 ty: MapType,
902 key_size: u32,
903 value_size: u32,
904 }
905
906 impl MapHandle {
907 /// Create a bpf map whose data is not managed by libbpf.
create<T: AsRef<OsStr>>( map_type: MapType, name: Option<T>, key_size: u32, value_size: u32, max_entries: u32, opts: &libbpf_sys::bpf_map_create_opts, ) -> Result<Self>908 pub fn create<T: AsRef<OsStr>>(
909 map_type: MapType,
910 name: Option<T>,
911 key_size: u32,
912 value_size: u32,
913 max_entries: u32,
914 opts: &libbpf_sys::bpf_map_create_opts,
915 ) -> Result<Self> {
916 let name = match name {
917 Some(name) => name.as_ref().to_os_string(),
918 // The old version kernel don't support specifying map name.
919 None => OsString::new(),
920 };
921 let name_c_str = CString::new(name.as_bytes()).map_err(|_| {
922 Error::with_invalid_data(format!("invalid name `{name:?}`: has NUL bytes"))
923 })?;
924 let name_c_ptr = if name.is_empty() {
925 ptr::null()
926 } else {
927 name_c_str.as_bytes_with_nul().as_ptr()
928 };
929
930 let fd = unsafe {
931 libbpf_sys::bpf_map_create(
932 map_type.into(),
933 name_c_ptr.cast(),
934 key_size,
935 value_size,
936 max_entries,
937 opts,
938 )
939 };
940 let () = util::parse_ret(fd)?;
941
942 Ok(Self {
943 // SAFETY: A file descriptor coming from the `bpf_map_create`
944 // function is always suitable for ownership and can be
945 // cleaned up with close.
946 fd: unsafe { OwnedFd::from_raw_fd(fd) },
947 name,
948 ty: map_type,
949 key_size,
950 value_size,
951 })
952 }
953
954 /// Open a previously pinned map from its path.
955 ///
956 /// # Panics
957 /// If the path contains null bytes.
from_pinned_path<P: AsRef<Path>>(path: P) -> Result<Self>958 pub fn from_pinned_path<P: AsRef<Path>>(path: P) -> Result<Self> {
959 fn inner(path: &Path) -> Result<MapHandle> {
960 let p = CString::new(path.as_os_str().as_bytes()).expect("path contained null bytes");
961 let fd = parse_ret_i32(unsafe {
962 // SAFETY
963 // p is never null since we allocated ourselves.
964 libbpf_sys::bpf_obj_get(p.as_ptr())
965 })?;
966 MapHandle::from_fd(unsafe {
967 // SAFETY
968 // A file descriptor coming from the bpf_obj_get function is always suitable for
969 // ownership and can be cleaned up with close.
970 OwnedFd::from_raw_fd(fd)
971 })
972 }
973
974 inner(path.as_ref())
975 }
976
977 /// Open a loaded map from its map id.
from_map_id(id: u32) -> Result<Self>978 pub fn from_map_id(id: u32) -> Result<Self> {
979 parse_ret_i32(unsafe {
980 // SAFETY
981 // This function is always safe to call.
982 libbpf_sys::bpf_map_get_fd_by_id(id)
983 })
984 .map(|fd| unsafe {
985 // SAFETY
986 // A file descriptor coming from the bpf_map_get_fd_by_id function is always suitable
987 // for ownership and can be cleaned up with close.
988 OwnedFd::from_raw_fd(fd)
989 })
990 .and_then(Self::from_fd)
991 }
992
from_fd(fd: OwnedFd) -> Result<Self>993 fn from_fd(fd: OwnedFd) -> Result<Self> {
994 let info = MapInfo::new(fd.as_fd())?;
995 Ok(Self {
996 fd,
997 name: info.name()?.into(),
998 ty: info.map_type(),
999 key_size: info.info.key_size,
1000 value_size: info.info.value_size,
1001 })
1002 }
1003
1004 /// Freeze the map as read-only from user space.
1005 ///
1006 /// Entries from a frozen map can no longer be updated or deleted with the
1007 /// bpf() system call. This operation is not reversible, and the map remains
1008 /// immutable from user space until its destruction. However, read and write
1009 /// permissions for BPF programs to the map remain unchanged.
freeze(&self) -> Result<()>1010 pub fn freeze(&self) -> Result<()> {
1011 let ret = unsafe { libbpf_sys::bpf_map_freeze(self.fd.as_raw_fd()) };
1012
1013 util::parse_ret(ret)
1014 }
1015
1016 /// [Pin](https://facebookmicrosites.github.io/bpf/blog/2018/08/31/object-lifetime.html#bpffs)
1017 /// this map to bpffs.
pin<P: AsRef<Path>>(&mut self, path: P) -> Result<()>1018 pub fn pin<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
1019 let path_c = util::path_to_cstring(path)?;
1020 let path_ptr = path_c.as_ptr();
1021
1022 let ret = unsafe { libbpf_sys::bpf_obj_pin(self.fd.as_raw_fd(), path_ptr) };
1023 util::parse_ret(ret)
1024 }
1025
1026 /// [Unpin](https://facebookmicrosites.github.io/bpf/blog/2018/08/31/object-lifetime.html#bpffs)
1027 /// this map from bpffs.
unpin<P: AsRef<Path>>(&mut self, path: P) -> Result<()>1028 pub fn unpin<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
1029 remove_file(path).context("failed to remove pin map")
1030 }
1031 }
1032
1033 impl MapCore for MapHandle {
1034 #[inline]
name(&self) -> &OsStr1035 fn name(&self) -> &OsStr {
1036 &self.name
1037 }
1038
1039 #[inline]
map_type(&self) -> MapType1040 fn map_type(&self) -> MapType {
1041 self.ty
1042 }
1043
1044 #[inline]
key_size(&self) -> u321045 fn key_size(&self) -> u32 {
1046 self.key_size
1047 }
1048
1049 #[inline]
value_size(&self) -> u321050 fn value_size(&self) -> u32 {
1051 self.value_size
1052 }
1053 }
1054
1055 impl AsFd for MapHandle {
1056 #[inline]
as_fd(&self) -> BorrowedFd<'_>1057 fn as_fd(&self) -> BorrowedFd<'_> {
1058 self.fd.as_fd()
1059 }
1060 }
1061
1062 impl<T> TryFrom<&MapImpl<'_, T>> for MapHandle
1063 where
1064 T: Debug,
1065 {
1066 type Error = Error;
1067
try_from(other: &MapImpl<'_, T>) -> Result<Self>1068 fn try_from(other: &MapImpl<'_, T>) -> Result<Self> {
1069 Ok(Self {
1070 fd: other
1071 .as_fd()
1072 .try_clone_to_owned()
1073 .context("failed to duplicate map file descriptor")?,
1074 name: other.name().to_os_string(),
1075 ty: other.map_type(),
1076 key_size: other.key_size(),
1077 value_size: other.value_size(),
1078 })
1079 }
1080 }
1081
1082 impl TryFrom<&MapHandle> for MapHandle {
1083 type Error = Error;
1084
try_from(other: &MapHandle) -> Result<Self>1085 fn try_from(other: &MapHandle) -> Result<Self> {
1086 Ok(Self {
1087 fd: other
1088 .as_fd()
1089 .try_clone_to_owned()
1090 .context("failed to duplicate map file descriptor")?,
1091 name: other.name().to_os_string(),
1092 ty: other.map_type(),
1093 key_size: other.key_size(),
1094 value_size: other.value_size(),
1095 })
1096 }
1097 }
1098
1099 bitflags! {
1100 /// Flags to configure [`Map`] operations.
1101 #[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Clone, Copy)]
1102 pub struct MapFlags: u64 {
1103 /// See [`libbpf_sys::BPF_ANY`].
1104 const ANY = libbpf_sys::BPF_ANY as _;
1105 /// See [`libbpf_sys::BPF_NOEXIST`].
1106 const NO_EXIST = libbpf_sys::BPF_NOEXIST as _;
1107 /// See [`libbpf_sys::BPF_EXIST`].
1108 const EXIST = libbpf_sys::BPF_EXIST as _;
1109 /// See [`libbpf_sys::BPF_F_LOCK`].
1110 const LOCK = libbpf_sys::BPF_F_LOCK as _;
1111 }
1112 }
1113
1114 /// Type of a [`Map`]. Maps to `enum bpf_map_type` in kernel uapi.
1115 // If you add a new per-cpu map, also update `is_percpu`.
1116 #[non_exhaustive]
1117 #[repr(u32)]
1118 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
1119 // TODO: Document members.
1120 #[allow(missing_docs)]
1121 pub enum MapType {
1122 Unspec = libbpf_sys::BPF_MAP_TYPE_UNSPEC,
1123 Hash = libbpf_sys::BPF_MAP_TYPE_HASH,
1124 Array = libbpf_sys::BPF_MAP_TYPE_ARRAY,
1125 ProgArray = libbpf_sys::BPF_MAP_TYPE_PROG_ARRAY,
1126 PerfEventArray = libbpf_sys::BPF_MAP_TYPE_PERF_EVENT_ARRAY,
1127 PercpuHash = libbpf_sys::BPF_MAP_TYPE_PERCPU_HASH,
1128 PercpuArray = libbpf_sys::BPF_MAP_TYPE_PERCPU_ARRAY,
1129 StackTrace = libbpf_sys::BPF_MAP_TYPE_STACK_TRACE,
1130 CgroupArray = libbpf_sys::BPF_MAP_TYPE_CGROUP_ARRAY,
1131 LruHash = libbpf_sys::BPF_MAP_TYPE_LRU_HASH,
1132 LruPercpuHash = libbpf_sys::BPF_MAP_TYPE_LRU_PERCPU_HASH,
1133 LpmTrie = libbpf_sys::BPF_MAP_TYPE_LPM_TRIE,
1134 ArrayOfMaps = libbpf_sys::BPF_MAP_TYPE_ARRAY_OF_MAPS,
1135 HashOfMaps = libbpf_sys::BPF_MAP_TYPE_HASH_OF_MAPS,
1136 Devmap = libbpf_sys::BPF_MAP_TYPE_DEVMAP,
1137 Sockmap = libbpf_sys::BPF_MAP_TYPE_SOCKMAP,
1138 Cpumap = libbpf_sys::BPF_MAP_TYPE_CPUMAP,
1139 Xskmap = libbpf_sys::BPF_MAP_TYPE_XSKMAP,
1140 Sockhash = libbpf_sys::BPF_MAP_TYPE_SOCKHASH,
1141 CgroupStorage = libbpf_sys::BPF_MAP_TYPE_CGROUP_STORAGE,
1142 ReuseportSockarray = libbpf_sys::BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
1143 PercpuCgroupStorage = libbpf_sys::BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
1144 Queue = libbpf_sys::BPF_MAP_TYPE_QUEUE,
1145 Stack = libbpf_sys::BPF_MAP_TYPE_STACK,
1146 SkStorage = libbpf_sys::BPF_MAP_TYPE_SK_STORAGE,
1147 DevmapHash = libbpf_sys::BPF_MAP_TYPE_DEVMAP_HASH,
1148 StructOps = libbpf_sys::BPF_MAP_TYPE_STRUCT_OPS,
1149 RingBuf = libbpf_sys::BPF_MAP_TYPE_RINGBUF,
1150 InodeStorage = libbpf_sys::BPF_MAP_TYPE_INODE_STORAGE,
1151 TaskStorage = libbpf_sys::BPF_MAP_TYPE_TASK_STORAGE,
1152 BloomFilter = libbpf_sys::BPF_MAP_TYPE_BLOOM_FILTER,
1153 UserRingBuf = libbpf_sys::BPF_MAP_TYPE_USER_RINGBUF,
1154 /// We choose to specify our own "unknown" type here b/c it's really up to the kernel
1155 /// to decide if it wants to reject the map. If it accepts it, it just means whoever
1156 /// using this library is a bit out of date.
1157 Unknown = u32::MAX,
1158 }
1159
1160 impl MapType {
1161 /// Returns if the map is of one of the per-cpu types.
is_percpu(&self) -> bool1162 pub fn is_percpu(&self) -> bool {
1163 matches!(
1164 self,
1165 MapType::PercpuArray
1166 | MapType::PercpuHash
1167 | MapType::LruPercpuHash
1168 | MapType::PercpuCgroupStorage
1169 )
1170 }
1171
1172 /// Returns if the map is keyless map type as per documentation of libbpf
1173 /// Keyless map types are: Queues, Stacks and Bloom Filters
is_keyless(&self) -> bool1174 fn is_keyless(&self) -> bool {
1175 matches!(self, MapType::Queue | MapType::Stack | MapType::BloomFilter)
1176 }
1177
1178 /// Returns if the map is of bloom filter type
is_bloom_filter(&self) -> bool1179 pub fn is_bloom_filter(&self) -> bool {
1180 MapType::BloomFilter.eq(self)
1181 }
1182
1183 /// Detects if host kernel supports this BPF map type.
1184 ///
1185 /// Make sure the process has required set of CAP_* permissions (or runs as
1186 /// root) when performing feature checking.
is_supported(&self) -> Result<bool>1187 pub fn is_supported(&self) -> Result<bool> {
1188 let ret = unsafe { libbpf_sys::libbpf_probe_bpf_map_type(*self as u32, ptr::null()) };
1189 match ret {
1190 0 => Ok(false),
1191 1 => Ok(true),
1192 _ => Err(Error::from_raw_os_error(-ret)),
1193 }
1194 }
1195 }
1196
1197 impl From<u32> for MapType {
from(value: u32) -> Self1198 fn from(value: u32) -> Self {
1199 use MapType::*;
1200
1201 match value {
1202 x if x == Unspec as u32 => Unspec,
1203 x if x == Hash as u32 => Hash,
1204 x if x == Array as u32 => Array,
1205 x if x == ProgArray as u32 => ProgArray,
1206 x if x == PerfEventArray as u32 => PerfEventArray,
1207 x if x == PercpuHash as u32 => PercpuHash,
1208 x if x == PercpuArray as u32 => PercpuArray,
1209 x if x == StackTrace as u32 => StackTrace,
1210 x if x == CgroupArray as u32 => CgroupArray,
1211 x if x == LruHash as u32 => LruHash,
1212 x if x == LruPercpuHash as u32 => LruPercpuHash,
1213 x if x == LpmTrie as u32 => LpmTrie,
1214 x if x == ArrayOfMaps as u32 => ArrayOfMaps,
1215 x if x == HashOfMaps as u32 => HashOfMaps,
1216 x if x == Devmap as u32 => Devmap,
1217 x if x == Sockmap as u32 => Sockmap,
1218 x if x == Cpumap as u32 => Cpumap,
1219 x if x == Xskmap as u32 => Xskmap,
1220 x if x == Sockhash as u32 => Sockhash,
1221 x if x == CgroupStorage as u32 => CgroupStorage,
1222 x if x == ReuseportSockarray as u32 => ReuseportSockarray,
1223 x if x == PercpuCgroupStorage as u32 => PercpuCgroupStorage,
1224 x if x == Queue as u32 => Queue,
1225 x if x == Stack as u32 => Stack,
1226 x if x == SkStorage as u32 => SkStorage,
1227 x if x == DevmapHash as u32 => DevmapHash,
1228 x if x == StructOps as u32 => StructOps,
1229 x if x == RingBuf as u32 => RingBuf,
1230 x if x == InodeStorage as u32 => InodeStorage,
1231 x if x == TaskStorage as u32 => TaskStorage,
1232 x if x == BloomFilter as u32 => BloomFilter,
1233 x if x == UserRingBuf as u32 => UserRingBuf,
1234 _ => Unknown,
1235 }
1236 }
1237 }
1238
1239 impl From<MapType> for u32 {
from(value: MapType) -> Self1240 fn from(value: MapType) -> Self {
1241 value as u32
1242 }
1243 }
1244
1245 /// An iterator over the keys of a BPF map.
1246 #[derive(Debug)]
1247 pub struct MapKeyIter<'map> {
1248 map_fd: BorrowedFd<'map>,
1249 prev: Option<Vec<u8>>,
1250 next: Vec<u8>,
1251 }
1252
1253 impl<'map> MapKeyIter<'map> {
new(map_fd: BorrowedFd<'map>, key_size: u32) -> Self1254 fn new(map_fd: BorrowedFd<'map>, key_size: u32) -> Self {
1255 Self {
1256 map_fd,
1257 prev: None,
1258 next: vec![0; key_size as usize],
1259 }
1260 }
1261 }
1262
1263 impl Iterator for MapKeyIter<'_> {
1264 type Item = Vec<u8>;
1265
next(&mut self) -> Option<Self::Item>1266 fn next(&mut self) -> Option<Self::Item> {
1267 let prev = self.prev.as_ref().map_or(ptr::null(), |p| p.as_ptr());
1268
1269 let ret = unsafe {
1270 libbpf_sys::bpf_map_get_next_key(
1271 self.map_fd.as_raw_fd(),
1272 prev as _,
1273 self.next.as_mut_ptr() as _,
1274 )
1275 };
1276 if ret != 0 {
1277 None
1278 } else {
1279 self.prev = Some(self.next.clone());
1280 Some(self.next.clone())
1281 }
1282 }
1283 }
1284
1285 /// A convenience wrapper for [`bpf_map_info`][libbpf_sys::bpf_map_info]. It
1286 /// provides the ability to retrieve the details of a certain map.
1287 #[derive(Debug)]
1288 pub struct MapInfo {
1289 /// The inner [`bpf_map_info`][libbpf_sys::bpf_map_info] object.
1290 pub info: bpf_map_info,
1291 }
1292
1293 impl MapInfo {
1294 /// Create a `MapInfo` object from a fd.
new(fd: BorrowedFd<'_>) -> Result<Self>1295 pub fn new(fd: BorrowedFd<'_>) -> Result<Self> {
1296 let mut map_info = bpf_map_info::default();
1297 let mut size = mem::size_of_val(&map_info) as u32;
1298 // SAFETY: All pointers are derived from references and hence valid.
1299 let () = util::parse_ret(unsafe {
1300 bpf_obj_get_info_by_fd(
1301 fd.as_raw_fd(),
1302 &mut map_info as *mut bpf_map_info as *mut c_void,
1303 &mut size as *mut u32,
1304 )
1305 })?;
1306 Ok(Self { info: map_info })
1307 }
1308
1309 /// Get the map type
1310 #[inline]
map_type(&self) -> MapType1311 pub fn map_type(&self) -> MapType {
1312 MapType::from(self.info.type_)
1313 }
1314
1315 /// Get the name of this map.
1316 ///
1317 /// Returns error if the underlying data in the structure is not a valid
1318 /// utf-8 string.
name<'a>(&self) -> Result<&'a str>1319 pub fn name<'a>(&self) -> Result<&'a str> {
1320 // SAFETY: convert &[i8] to &[u8], and then cast that to &str. i8 and u8 has the same size.
1321 let char_slice =
1322 unsafe { from_raw_parts(self.info.name[..].as_ptr().cast(), self.info.name.len()) };
1323
1324 util::c_char_slice_to_cstr(char_slice)
1325 .ok_or_else(|| Error::with_invalid_data("no nul byte found"))?
1326 .to_str()
1327 .map_err(Error::with_invalid_data)
1328 }
1329
1330 /// Get the map flags.
1331 #[inline]
flags(&self) -> MapFlags1332 pub fn flags(&self) -> MapFlags {
1333 MapFlags::from_bits_truncate(self.info.map_flags as u64)
1334 }
1335 }
1336
1337 #[cfg(test)]
1338 mod tests {
1339 use super::*;
1340
1341 use std::mem::discriminant;
1342
1343 #[test]
map_type()1344 fn map_type() {
1345 use MapType::*;
1346
1347 for t in [
1348 Unspec,
1349 Hash,
1350 Array,
1351 ProgArray,
1352 PerfEventArray,
1353 PercpuHash,
1354 PercpuArray,
1355 StackTrace,
1356 CgroupArray,
1357 LruHash,
1358 LruPercpuHash,
1359 LpmTrie,
1360 ArrayOfMaps,
1361 HashOfMaps,
1362 Devmap,
1363 Sockmap,
1364 Cpumap,
1365 Xskmap,
1366 Sockhash,
1367 CgroupStorage,
1368 ReuseportSockarray,
1369 PercpuCgroupStorage,
1370 Queue,
1371 Stack,
1372 SkStorage,
1373 DevmapHash,
1374 StructOps,
1375 RingBuf,
1376 InodeStorage,
1377 TaskStorage,
1378 BloomFilter,
1379 UserRingBuf,
1380 Unknown,
1381 ] {
1382 // check if discriminants match after a roundtrip conversion
1383 assert_eq!(discriminant(&t), discriminant(&MapType::from(t as u32)));
1384 }
1385 }
1386 }
1387