• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (c) 2023 Huawei Device Co., Ltd.
2 // Licensed under the Apache License, Version 2.0 (the "License");
3 // you may not use this file except in compliance with the License.
4 // You may obtain a copy of the License at
5 //
6 //     http://www.apache.org/licenses/LICENSE-2.0
7 //
8 // Unless required by applicable law or agreed to in writing, software
9 // distributed under the License is distributed on an "AS IS" BASIS,
10 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 // See the License for the specific language governing permissions and
12 // limitations under the License.
13 
14 //! ## `slab` can allocate storage space for the same data type
15 //!
16 //! `slab` will pre-allocate space for the stored data.
17 //! When the amount of stored data exceeds the pre-allocated space,
18 //! `slab` has a growth strategy similar to the [`vec`][vec] module,
19 //! and when new space is needed, the `slab` grows to **twice**.
20 //!
21 //! ### Page
22 //!
23 //! The primary storage space in `slab` is a two-dimensional array
24 //! that holds ['vec'][vec] containers on each page, which grows as
25 //! `page` grows, with `page` initially being 32 in length and each
26 //! new `page` added thereafter requiring a length of 2x. The total
27 //! number of pages in `page` is 19.
28 //!
29 //! ### Release
30 //!
31 //! When a piece of data in `slab` is no longer in use and is freed,
32 //! the space where the current data store is located should be reused,
33 //! and this operation will be used in conjunction with the allocation
34 //! operation.
35 //!
36 //! ### Allocate
37 //!
38 //! There are two cases of space allocation for `slab`. One case is
39 //! that the current space has never been allocated before, then normal
40 //! space allocation is done for the current container and the parameters
41 //! are updated. In the other case, it is used in conjunction with the
42 //! function release. i.e., when the allocation is done again, the space
43 //! where the previously freed data is located will be used.
44 //!
45 //! ### Compact
46 //!
47 //! is used to clean up the resources in the `slab` container after a
48 //! specific number of loops, which is one of the most important uses
49 //! of this container, to clean up the space that has been allocated
50 //! but has not yet been used.
51 //!
52 //! [vec]: https://doc.rust-lang.org/std/vec/index.html
53 
54 use std::cell::UnsafeCell;
55 use std::ops::Deref;
56 use std::sync::atomic::Ordering::{Relaxed, SeqCst};
57 use std::sync::atomic::{AtomicBool, AtomicUsize};
58 use std::sync::{Arc, Mutex};
59 
60 /// The maximum number of `pages` that `Slab` can hold
61 const NUM_PAGES: usize = 19;
62 
63 /// The minimum number of `slots` that `page` can hold
64 const PAGE_INITIAL_SIZE: usize = 32;
65 const PAGE_INDEX_SHIFT: u32 = PAGE_INITIAL_SIZE.trailing_zeros() + 1;
66 
67 /// trait bounds mechanism, so that the binder must implement the `Entry` and
68 /// `Default` trait methods
69 pub trait Entry: Default {
70     /// Resets the entry.
reset(&self)71     fn reset(&self);
72 }
73 
74 /// Reference to data stored in `slab`
75 pub struct Ref<T> {
76     value: *const Value<T>,
77 }
78 
79 /// Release operation of data stored in `slab` for reuse in the next allocated
80 /// space
81 impl<T> Drop for Ref<T> {
drop(&mut self)82     fn drop(&mut self) {
83         unsafe {
84             let _ = (*self.value).release();
85         }
86     }
87 }
88 
89 /// Provide unquote operation for user-friendly operation
90 impl<T> Deref for Ref<T> {
91     type Target = T;
92 
deref(&self) -> &T93     fn deref(&self) -> &T {
94         unsafe { &(*self.value).value }
95     }
96 }
97 
98 unsafe impl<T: Sync> Sync for Ref<T> {}
99 unsafe impl<T: Sync> Send for Ref<T> {}
100 
101 /// The Address of the stored data.
102 #[derive(Debug, Copy, Clone, PartialEq, Eq)]
103 pub struct Address(usize);
104 
105 /// Gets the bit size of a pointer.
pointer_width() -> u32106 pub const fn pointer_width() -> u32 {
107     std::mem::size_of::<usize>() as u32 * 8
108 }
109 
110 impl Address {
111     /// Get the number of `page` pages at the current address
page(&self) -> usize112     pub fn page(&self) -> usize {
113         let slot_shifted = (self.0 + PAGE_INITIAL_SIZE) >> PAGE_INDEX_SHIFT;
114         (pointer_width() - slot_shifted.leading_zeros()) as usize
115     }
116 
117     /// Convert `Address` to `usize`
as_usize(self) -> usize118     pub const fn as_usize(self) -> usize {
119         self.0
120     }
121 
122     /// Convert `usize` to `Address`
from_usize(src: usize) -> Address123     pub fn from_usize(src: usize) -> Address {
124         Address(src)
125     }
126 }
127 
128 /// Amortized allocation for homogeneous data types.
129 pub struct Slab<T> {
130     /// Essentially a two-dimensional array, the constituent units in the
131     /// container
132     pages: [Arc<Page<T>>; NUM_PAGES],
133 }
134 
135 impl<T: Entry> Default for Slab<T> {
default() -> Slab<T>136     fn default() -> Slab<T> {
137         Slab::new()
138     }
139 }
140 
141 impl<T: Entry> Slab<T> {
142     /// Set up the initialization parameters
new() -> Slab<T>143     pub fn new() -> Slab<T> {
144         let mut slab = Slab {
145             pages: Default::default(),
146         };
147 
148         // The minimum number of `slots` that can fit in a `page` at initialization,
149         // where the default value is 32
150         let mut len = PAGE_INITIAL_SIZE;
151         // The sum of the lengths of all `pages` before this `page`, i.e. the sum of
152         // `len`
153         let mut prev_len: usize = 0;
154 
155         for page in &mut slab.pages {
156             let page = Arc::get_mut(page).unwrap();
157             page.len = len;
158             page.prev_len = prev_len;
159             // The `len` of each `page` will be doubled from the previous one
160             len *= 2;
161             prev_len += page.len;
162         }
163 
164         slab
165     }
166 
167     /// Easy to call for allocation
handle(&self) -> Slab<T>168     pub fn handle(&self) -> Slab<T> {
169         Slab {
170             pages: self.pages.clone(),
171         }
172     }
173 
174     /// Space allocation for containers
175     ///
176     /// # Safety
177     /// 1. The essence of space allocation to the container is actually to
178     ///    allocate each page of the container for the operation
179     /// 2. Before allocating each page of the container, we will try to get lock
180     ///    permission to prevent multiple threads from having permission to
181     ///    modify the state
182     ///
183     /// Using pointers
allocate(&self) -> Option<(Address, Ref<T>)>184     pub unsafe fn allocate(&self) -> Option<(Address, Ref<T>)> {
185         // Find the first available `slot`
186         for page in &self.pages[..] {
187             if let Some((addr, val)) = Page::allocate(page) {
188                 return Some((addr, val));
189             }
190         }
191 
192         None
193     }
194 
195     /// Iterating over the data in the container
for_each(&mut self, mut f: impl FnMut(&T))196     pub fn for_each(&mut self, mut f: impl FnMut(&T)) {
197         for page_idx in 0..self.pages.len() {
198             let slots = self.pages[page_idx].slots.lock().unwrap();
199 
200             for slot_idx in 0..slots.slots.len() {
201                 unsafe {
202                     let slot = slots.slots.as_ptr().add(slot_idx);
203                     let value = slot as *const Value<T>;
204 
205                     f(&(*value).value);
206                 }
207             }
208         }
209     }
210 
211     /// Used to get the reference stored at the given address
get(&mut self, addr: Address) -> Option<&T>212     pub fn get(&mut self, addr: Address) -> Option<&T> {
213         let page_idx = addr.page();
214         let slot_idx = self.pages[page_idx].slot(addr);
215 
216         if !self.pages[page_idx].allocated.load(SeqCst) {
217             return None;
218         }
219 
220         unsafe {
221             // Fetch by pointer, usage is similar to `C`
222             let slot = self.pages[page_idx]
223                 .slots
224                 .lock()
225                 .unwrap()
226                 .slots
227                 .as_ptr()
228                 .add(slot_idx);
229 
230             let value = slot as *const Value<T>;
231 
232             Some(&(*value).value)
233         }
234     }
235 
236     /// Used to clean up the resources in the `Slab` container after a specific
237     /// number of loops, which is one of the most important uses of this
238     /// container
239     ///
240     /// # Safety
241     /// Releasing resources here does not release resources that are being used
242     /// or have not yet been allocated
243     /// 1. The release of each page will initially determine if the resources on
244     ///    the current page are being used or if the current page has not been
245     ///    allocated
246     /// 2. Next, it will determine whether the `slots` of the current page are
247     ///    owned by other threads to prevent its resources from changing to the
248     ///    used state
249     /// 3. Finally, the checks are performed again, with the same checks as in
250     ///    the first step, to prevent state changes and ensure that no errors or
251     ///    invalid releases are made
252     ///
253     /// Using atomic variables
compact(&mut self)254     pub unsafe fn compact(&mut self) {
255         for (_, page) in (self.pages[1..]).iter().enumerate() {
256             // The `slots` of the current `page` are being used, or the current `page` is
257             // not allocated and not cleaned up.
258             if page.used.load(Relaxed) != 0 || !page.allocated.load(Relaxed) {
259                 continue;
260             }
261 
262             // The current `slots` are being owned by other threads and are not cleaned up.
263             let mut slots = match page.slots.try_lock() {
264                 Ok(slots) => slots,
265                 _ => continue,
266             };
267 
268             // Check again, if the `slots` of the current `page` are being used, or if the
269             // current `page` is not allocated, do not clean up.
270             if slots.used > 0 || slots.slots.capacity() == 0 {
271                 continue;
272             }
273 
274             page.allocated.store(false, Relaxed);
275 
276             let vec = std::mem::take(&mut slots.slots);
277             slots.head = 0;
278 
279             drop(slots);
280             drop(vec);
281         }
282     }
283 }
284 
285 struct Page<T> {
286     // Number of `slots` currently being used
287     pub used: AtomicUsize,
288     // Whether the current `page` is allocated space
289     pub allocated: AtomicBool,
290     // The number of `slots` that `page` can hold
291     pub len: usize,
292     // The sum of the lengths of all `pages` before the `page`, i.e. the sum of the number of
293     // `slots`
294     pub prev_len: usize,
295     // `Slots`
296     pub slots: Mutex<Slots<T>>,
297 }
298 
299 unsafe impl<T: Sync> Sync for Page<T> {}
300 unsafe impl<T: Sync> Send for Page<T> {}
301 
302 impl<T> Page<T> {
303     // Get the location of the `slot` in the current `page` based on the current
304     // `Address`.
slot(&self, addr: Address) -> usize305     fn slot(&self, addr: Address) -> usize {
306         addr.0 - self.prev_len
307     }
308 
309     // Get the current `Address` based on the `slot` location in the current `page`
addr(&self, slot: usize) -> Address310     fn addr(&self, slot: usize) -> Address {
311         Address(slot + self.prev_len)
312     }
313 
release(&self, value: *const Value<T>)314     fn release(&self, value: *const Value<T>) {
315         let mut locked = self.slots.lock().unwrap();
316 
317         // Get the current `slot` based on the `value` value
318         let idx = locked.index_for(value);
319         locked.slots[idx].next = locked.head as u32;
320         locked.head = idx;
321         locked.used -= 1;
322 
323         self.used.store(locked.used, Relaxed);
324     }
325 }
326 
327 impl<T: Entry> Page<T> {
allocate(me: &Arc<Page<T>>) -> Option<(Address, Ref<T>)>328     unsafe fn allocate(me: &Arc<Page<T>>) -> Option<(Address, Ref<T>)> {
329         if me.used.load(Relaxed) == me.len {
330             return None;
331         }
332 
333         let mut locked = me.slots.lock().unwrap();
334 
335         if locked.head < locked.slots.len() {
336             let locked = &mut *locked;
337 
338             let idx = locked.head;
339             let slot = &locked.slots[idx];
340 
341             locked.head = slot.next as usize;
342 
343             locked.used += 1;
344             me.used.store(locked.used, Relaxed);
345 
346             (*slot.value.get()).value.reset();
347 
348             Some((me.addr(idx), slot.gen_ref(me)))
349         } else if me.len == locked.slots.len() {
350             None
351         } else {
352             let idx = locked.slots.len();
353 
354             if idx == 0 {
355                 locked.slots.reserve_exact(me.len);
356             }
357 
358             locked.slots.push(Slot {
359                 value: UnsafeCell::new(Value {
360                     value: Default::default(),
361                     page: &**me as *const _,
362                 }),
363                 next: 0,
364             });
365 
366             locked.head += 1;
367             locked.used += 1;
368             me.used.store(locked.used, Relaxed);
369             me.allocated.store(true, Relaxed);
370 
371             Some((me.addr(idx), locked.slots[idx].gen_ref(me)))
372         }
373     }
374 }
375 
376 impl<T> Default for Page<T> {
default() -> Page<T>377     fn default() -> Page<T> {
378         Page {
379             used: AtomicUsize::new(0),
380             allocated: AtomicBool::new(false),
381             len: 0,
382             prev_len: 0,
383             slots: Mutex::new(Slots::new()),
384         }
385     }
386 }
387 
388 struct Slots<T> {
389     pub slots: Vec<Slot<T>>,
390     pub head: usize,
391     pub used: usize,
392 }
393 
394 impl<T> Slots<T> {
new() -> Slots<T>395     fn new() -> Slots<T> {
396         Slots {
397             slots: Vec::new(),
398             head: 0,
399             used: 0,
400         }
401     }
402 
index_for(&self, slot: *const Value<T>) -> usize403     fn index_for(&self, slot: *const Value<T>) -> usize {
404         use std::mem;
405 
406         // Get the first address of the current `page`
407         let base = &self.slots[0] as *const _ as usize;
408 
409         // The case where the first address is 0 and `page` is unallocated
410         // if base == 0 {
411         //     logerr!("`page` unallocated");
412         // }
413 
414         // Get the current `slot` address
415         let slot = slot as usize;
416         // Get `Vec` internal element size
417         let width = mem::size_of::<Slot<T>>();
418 
419         // if slot < base {
420         //     logerr!("wrong address");
421         // }
422 
423         // Get the current `idx`
424         (slot - base) / width
425         // if idx >= self.slots.len() as usize {
426         //     logerr!("idx out of range");
427         // }
428     }
429 }
430 
431 #[derive(Debug)]
432 #[repr(C)]
433 struct Slot<T> {
434     pub value: UnsafeCell<Value<T>>,
435     pub next: u32,
436 }
437 
438 impl<T> Slot<T> {
gen_ref(&self, page: &Arc<Page<T>>) -> Ref<T>439     fn gen_ref(&self, page: &Arc<Page<T>>) -> Ref<T> {
440         std::mem::forget(page.clone());
441         let slot = self as *const Slot<T>;
442         let value = slot as *const Value<T>;
443 
444         Ref { value }
445     }
446 }
447 
448 #[derive(Debug)]
449 #[repr(C)]
450 struct Value<T> {
451     pub value: T,
452     pub page: *const Page<T>,
453 }
454 
455 impl<T> Value<T> {
release(&self) -> Arc<Page<T>>456     unsafe fn release(&self) -> Arc<Page<T>> {
457         let page = Arc::from_raw(self.page);
458         page.release(self as *const _);
459         page
460     }
461 }
462 
463 #[cfg(test)]
464 mod test {
465     use std::sync::atomic::AtomicUsize;
466     use std::sync::atomic::Ordering::SeqCst;
467 
468     use crate::util::slab::{Address, Entry, Slab, NUM_PAGES, PAGE_INITIAL_SIZE};
469 
470     struct Foo {
471         cnt: AtomicUsize,
472         id: AtomicUsize,
473     }
474 
475     impl Default for Foo {
default() -> Foo476         fn default() -> Foo {
477             Foo {
478                 cnt: AtomicUsize::new(0),
479                 id: AtomicUsize::new(0),
480             }
481         }
482     }
483 
484     impl Entry for Foo {
reset(&self)485         fn reset(&self) {
486             self.cnt.fetch_add(1, SeqCst);
487         }
488     }
489 
490     /// UT test cases for Slab::new()
491     ///
492     /// # Brief
493     /// 1. Check the parameters for completion of initialization, such as the
494     ///    number of pages to be checked, the length of each page.
495     #[test]
ut_slab_new()496     fn ut_slab_new() {
497         let slab = Slab::<Foo>::new();
498         assert_eq!(slab.pages.len(), NUM_PAGES);
499 
500         for (index, page) in slab.pages.iter().enumerate() {
501             assert_eq!(page.len, PAGE_INITIAL_SIZE * 2_usize.pow(index as u32));
502         }
503     }
504 
505     /// UT test cases for Slab::for_each()
506     ///
507     /// # Brief
508     /// 1. To deposit data into the container, call this function to verify that
509     ///    the data is correctly deposited stored and can be matched one by one.
510     #[test]
ut_slab_for_each()511     fn ut_slab_for_each() {
512         let mut slab = Slab::<Foo>::new();
513         let alloc = slab.handle();
514 
515         unsafe {
516             // Find the first available `slot` and return its `addr` and `Ref`
517             let (_, foo1) = alloc.allocate().unwrap();
518             // Modify the current `id`
519             foo1.id.store(1, SeqCst);
520 
521             // Find the second available `slot` and return its `addr` and `Ref`
522             let (_, foo2) = alloc.allocate().unwrap();
523             foo2.id.store(2, SeqCst);
524 
525             // Find the second available `slot` and return its `addr` and `Ref`
526             let (_, foo3) = alloc.allocate().unwrap();
527             foo3.id.store(3, SeqCst);
528         }
529 
530         let mut temp = vec![3, 2, 1];
531         slab.for_each(|value| {
532             assert_eq!(temp.pop().unwrap(), value.id.load(SeqCst));
533         });
534     }
535 
536     /// UT test cases for Slab::get()
537     ///
538     /// # Brief
539     /// 1. Allocate container space and deposit data, get the data address,and
540     ///    see if the data can be fetched.
541     /// 2. Create invalid data address to see if data can be obtained.
542     #[test]
ut_slab_get()543     fn ut_slab_get() {
544         let mut slab = Slab::<Foo>::new();
545 
546         unsafe {
547             let (addr, _) = slab.allocate().unwrap();
548             assert!(slab.get(addr).is_some());
549 
550             let un_addr = Address::from_usize(10000);
551             assert!(slab.get(un_addr).is_none());
552         }
553     }
554 
555     /// UT test cases for Slab::compact()
556     ///
557     /// # Brief
558     /// 1. Pages with allocated space on the first page are not set to
559     ///    unallocated even if they are not used.
560     /// 2. Pages other than the first page, once assigned and unused, will be
561     ///    set to unassigned status.
562     #[test]
ut_slab_compact()563     fn ut_slab_compact() {
564         let mut slab = Slab::<Foo>::new();
565         let mut address = Vec::new();
566 
567         unsafe {
568             for data in 0..33 {
569                 let (addr, foo) = slab.allocate().unwrap();
570                 foo.id.store(data, SeqCst);
571                 address.push((addr, foo));
572             }
573             slab.compact();
574             assert_eq!(slab.get(address[32].0).unwrap().id.load(SeqCst), 32);
575         }
576 
577         let mut slab = Slab::<Foo>::new();
578         let mut address = Vec::new();
579 
580         unsafe {
581             assert!(!slab.pages[1].allocated.load(SeqCst));
582 
583             for _ in 0..33 {
584                 let (addr, foo) = slab.allocate().unwrap();
585                 address.push((addr, foo));
586             }
587             assert!(slab.pages[1].allocated.load(SeqCst));
588             assert_eq!(slab.pages[1].used.load(SeqCst), 1);
589             drop(address.pop().unwrap().1);
590             assert!(slab.pages[1].allocated.load(SeqCst));
591             assert_eq!(slab.pages[1].used.load(SeqCst), 0);
592             slab.compact();
593             assert!(!slab.pages[1].allocated.load(SeqCst));
594         }
595     }
596 }
597