Home
last modified time | relevance | path

Searched full:allocator (Results 1 – 25 of 1115) sorted by relevance

12345678910>>...45

/kernel/linux/linux-5.10/drivers/iommu/
Dioasid.c3 * I/O Address Space ID allocator. There is one global IOASID space, split into
22 * about an allocator. There are two types of allocators:
24 * - Default allocator always has its own XArray to track the IOASIDs allocated.
29 * 1. Default allocator is always available, not dynamically registered. This is
32 * 2. Custom allocators take precedence over the default allocator.
38 * 5. When switching between custom allocator and default allocator, all IOASIDs
39 * must be freed to ensure unadulterated space for the new allocator.
41 * @ops: allocator helper functions and its data
44 * @flags: attributes of the allocator
46 * @rcu: used for kfree_rcu when unregistering allocator
[all …]
/kernel/linux/linux-6.6/rust/alloc/vec/
Dpartial_eq.rs3 use crate::alloc::Allocator;
25 __impl_slice_eq1! { [A1: Allocator, A2: Allocator] Vec<T, A1>, Vec<U, A2>, #[stable(feature = "rust…
26 __impl_slice_eq1! { [A: Allocator] Vec<T, A>, &[U], #[stable(feature = "rust1", since = "1.0.0")] }
27 __impl_slice_eq1! { [A: Allocator] Vec<T, A>, &mut [U], #[stable(feature = "rust1", since = "1.0.0"…
28 __impl_slice_eq1! { [A: Allocator] &[T], Vec<U, A>, #[stable(feature = "partialeq_vec_for_ref_slice…
29 __impl_slice_eq1! { [A: Allocator] &mut [T], Vec<U, A>, #[stable(feature = "partialeq_vec_for_ref_s…
30 __impl_slice_eq1! { [A: Allocator] Vec<T, A>, [U], #[stable(feature = "partialeq_vec_for_slice", si…
31 __impl_slice_eq1! { [A: Allocator] [T], Vec<U, A>, #[stable(feature = "partialeq_vec_for_slice", si…
33 __impl_slice_eq1! { [A: Allocator] Cow<'_, [T]>, Vec<U, A> where T: Clone, #[stable(feature = "rust…
38 __impl_slice_eq1! { [A: Allocator, const N: usize] Vec<T, A>, [U; N], #[stable(feature = "rust1", s…
[all …]
Dspec_extend.rs3 use crate::alloc::Allocator;
22 impl<T, I, A: Allocator> SpecExtend<T, I> for Vec<T, A>
31 impl<T, I, A: Allocator> TrySpecExtend<T, I> for Vec<T, A>
41 impl<T, I, A: Allocator> SpecExtend<T, I> for Vec<T, A>
50 impl<T, I, A: Allocator> TrySpecExtend<T, I> for Vec<T, A>
60 impl<T, A: Allocator> SpecExtend<T, IntoIter<T>> for Vec<T, A> {
69 impl<T, A: Allocator> TrySpecExtend<T, IntoIter<T>> for Vec<T, A> {
80 impl<'a, T: 'a, I, A: Allocator> SpecExtend<&'a T, I> for Vec<T, A>
90 impl<'a, T: 'a, I, A: Allocator> TrySpecExtend<&'a T, I> for Vec<T, A>
101 impl<'a, T: 'a, A: Allocator> SpecExtend<&'a T, slice::Iter<'a, T>> for Vec<T, A>
[all …]
Ddrain.rs3 use crate::alloc::{Allocator, Global};
27 #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + 'a = Global,
39 impl<T: fmt::Debug, A: Allocator> fmt::Debug for Drain<'_, T, A> {
45 impl<'a, T, A: Allocator> Drain<'a, T, A> {
63 /// Returns a reference to the underlying allocator.
67 pub fn allocator(&self) -> &A { in allocator() method
68 unsafe { self.vec.as_ref().allocator() } in allocator()
142 impl<'a, T, A: Allocator> AsRef<[T]> for Drain<'a, T, A> {
149 unsafe impl<T: Sync, A: Sync + Allocator> Sync for Drain<'_, T, A> {}
151 unsafe impl<T: Send, A: Send + Allocator> Send for Drain<'_, T, A> {}
[all …]
Dinto_iter.rs5 use crate::alloc::{Allocator, Global};
37 #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
43 // to avoid dropping the allocator twice we need to wrap it into ManuallyDrop
52 impl<T: fmt::Debug, A: Allocator> fmt::Debug for IntoIter<T, A> {
58 impl<T, A: Allocator> IntoIter<T, A> {
93 /// Returns a reference to the underlying allocator.
96 pub fn allocator(&self) -> &A { in allocator() method
148 // Keep our `Drop` impl from dropping the elements and the allocator in into_vecdeque()
174 impl<T, A: Allocator> AsRef<[T]> for IntoIter<T, A> {
181 unsafe impl<T: Send, A: Allocator + Send> Send for IntoIter<T, A> {}
[all …]
Dextract_if.rs3 use crate::alloc::{Allocator, Global};
29 #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
44 impl<T, F, A: Allocator> ExtractIf<'_, T, F, A>
48 /// Returns a reference to the underlying allocator.
51 pub fn allocator(&self) -> &A { in allocator() function
52 self.vec.allocator() in allocator()
57 impl<T, F, A: Allocator> Iterator for ExtractIf<'_, T, F, A>
93 impl<T, F, A: Allocator> Drop for ExtractIf<'_, T, F, A>
/kernel/linux/linux-6.6/mm/
DKconfig141 prompt "Default allocator"
145 Selects the default allocator for the compressed cache for
158 Use the zbud allocator as the default allocator.
164 Use the z3fold allocator as the default allocator.
173 Use the zsmalloc allocator as the default allocator.
185 tristate "2:1 compression allocator (zbud)"
188 A special purpose allocator for storing compressed pages.
195 tristate "3:1 compression allocator (z3fold) (DEPRECATED)"
202 A special purpose allocator for storing compressed pages.
215 prompt "N:1 compression allocator (zsmalloc)" if ZSWAP
[all …]
/kernel/linux/linux-6.6/fs/nilfs2/
Dalloc.c3 * NILFS dat/inode allocator
23 * @inode: inode of metadata file using this allocator
34 * @inode: inode of metadata file using this allocator
43 * nilfs_palloc_init_blockgroup - initialize private variables for allocator
44 * @inode: inode of metadata file using this allocator
78 * @inode: inode of metadata file using this allocator
93 * @inode: inode of metadata file using this allocator
109 * @inode: inode of metadata file using this allocator
162 * @inode: inode of metadata file using this allocator
234 * nilfs_palloc_delete_block - delete a block on the persistent allocator file
[all …]
/kernel/linux/linux-5.10/fs/nilfs2/
Dalloc.c3 * alloc.c - NILFS dat/inode allocator
23 * @inode: inode of metadata file using this allocator
34 * @inode: inode of metadata file using this allocator
43 * nilfs_palloc_init_blockgroup - initialize private variables for allocator
44 * @inode: inode of metadata file using this allocator
78 * @inode: inode of metadata file using this allocator
93 * @inode: inode of metadata file using this allocator
109 * @inode: inode of metadata file using this allocator
162 * @inode: inode of metadata file using this allocator
234 * nilfs_palloc_delete_block - delete a block on the persistent allocator file
[all …]
/kernel/linux/linux-5.10/include/linux/
Dioasid.h18 * struct ioasid_allocator_ops - IOASID allocator helper functions and data
23 * @pdata: data belong to the allocator, provided when calling alloc()
40 int ioasid_register_allocator(struct ioasid_allocator_ops *allocator);
41 void ioasid_unregister_allocator(struct ioasid_allocator_ops *allocator);
61 static inline int ioasid_register_allocator(struct ioasid_allocator_ops *allocator) in ioasid_register_allocator() argument
66 static inline void ioasid_unregister_allocator(struct ioasid_allocator_ops *allocator) in ioasid_unregister_allocator() argument
/kernel/linux/linux-6.6/rust/alloc/
Dalloc.rs21 // These are the magic symbols to call the global allocator. rustc generates
44 /// The global memory allocator.
46 /// This type implements the [`Allocator`] trait by forwarding calls
47 /// to the allocator registered with the `#[global_allocator]` attribute
60 /// Allocate memory with the global allocator.
63 /// of the allocator registered with the `#[global_allocator]` attribute
67 /// of the [`Global`] type when it and the [`Allocator`] trait become stable.
96 // Make sure we don't accidentally allow omitting the allocator shim in in alloc()
104 /// Deallocate memory with the global allocator.
107 /// of the allocator registered with the `#[global_allocator]` attribute
[all …]
Dboxed.rs57 //! For non-zero-sized values, a [`Box`] will use the [`Global`] allocator for
59 //! raw pointer allocated with the [`Global`] allocator, given that the
60 //! [`Layout`] used with the allocator is correct for the type. More precisely,
61 //! a `value: *mut T` that has been allocated with the [`Global`] allocator
65 //! [`Global`] allocator with [`Layout::for_value(&*value)`].
116 //! free the value with the global allocator. In general, the best practice
118 //! allocator.
172 use crate::alloc::{AllocError, Allocator, Global, Layout};
201 #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
364 impl<T, A: Allocator> Box<T, A> {
[all …]
Draw_vec.rs14 use crate::alloc::{Allocator, Global, Layout};
43 /// * Uses the excess returned from the allocator to use the largest available capacity.
53 pub(crate) struct RawVec<T, A: Allocator = Global> {
106 impl<T, A: Allocator> RawVec<T, A> {
120 /// Like `new`, but parameterized over the choice of allocator for
128 /// allocator for the returned `RawVec`.
136 /// allocator for the returned `RawVec`.
143 /// of allocator for the returned `RawVec`.
161 /// an allocator could overallocate and return a greater memory block than requested.
236 /// Reconstitutes a `RawVec` from a pointer, capacity, and allocator.
[all …]
/kernel/linux/linux-5.10/net/core/
Dxdp.c89 static void mem_allocator_disconnect(void *allocator) in mem_allocator_disconnect() argument
101 if (xa->allocator == allocator) in mem_allocator_disconnect()
229 /* Allocate a cyclic ID that maps to allocator pointer.
243 /* Cyclic allocator, reset next id */ in __mem_id_cyclic_get()
269 void *allocator) in __xdp_reg_mem_model() argument
281 if (!allocator) { in __xdp_reg_mem_model()
308 xdp_alloc->allocator = allocator; in __xdp_reg_mem_model()
310 /* Insert allocator into ID lookup table */ in __xdp_reg_mem_model()
320 page_pool_use_xdp_mem(allocator, mem_allocator_disconnect); in __xdp_reg_mem_model()
332 enum xdp_mem_type type, void *allocator) in xdp_reg_mem_model() argument
[all …]
/kernel/linux/linux-5.10/Documentation/core-api/
Dmemory-allocation.rst10 or you can directly request pages from the page allocator with
33 zones can be used, how hard the allocator should try to find free
90 useful to understand how hard the page allocator will try to satisfy that
112 **default** page allocator behavior is used. That means that not costly
117 * ``GFP_KERNEL | __GFP_NORETRY`` - overrides the default allocator behavior
122 * ``GFP_KERNEL | __GFP_RETRY_MAYFAIL`` - overrides the default allocator
127 * ``GFP_KERNEL | __GFP_NOFAIL`` - overrides the default allocator behavior
131 Selecting memory allocator
151 request pages from the page allocator. The memory allocated by `vmalloc`
163 cache allocator. The cache should be set up with kmem_cache_create() or
/kernel/linux/linux-6.6/Documentation/core-api/
Dmemory-allocation.rst10 or you can directly request pages from the page allocator with
33 zones can be used, how hard the allocator should try to find free
90 useful to understand how hard the page allocator will try to satisfy that
112 **default** page allocator behavior is used. That means that not costly
117 * ``GFP_KERNEL | __GFP_NORETRY`` - overrides the default allocator behavior
122 * ``GFP_KERNEL | __GFP_RETRY_MAYFAIL`` - overrides the default allocator
127 * ``GFP_KERNEL | __GFP_NOFAIL`` - overrides the default allocator behavior
131 Selecting memory allocator
155 request pages from the page allocator. The memory allocated by `vmalloc`
167 cache allocator. The cache should be set up with kmem_cache_create() or
/kernel/linux/linux-6.6/include/drm/
Ddrm_mm.h149 * struct drm_mm_node - allocated block in the DRM allocator
151 * This represents an allocated block in a &drm_mm allocator. Except for
183 * struct drm_mm - DRM allocator
185 * DRM range allocator with a few special functions and features geared towards
219 * struct drm_mm_scan - DRM allocator eviction roaster data
264 * drm_mm_initialized - checks whether an allocator is initialized
358 * @mm: &drm_mm allocator to walk
360 * This iterator walks over all nodes in the range allocator. It is implemented
370 * @mm: &drm_mm allocator to walk
372 * This iterator walks over all nodes in the range allocator. It is implemented
[all …]
/kernel/linux/linux-5.10/include/drm/
Ddrm_mm.h147 * struct drm_mm_node - allocated block in the DRM allocator
149 * This represents an allocated block in a &drm_mm allocator. Except for
181 * struct drm_mm - DRM allocator
183 * DRM range allocator with a few special functions and features geared towards
217 * struct drm_mm_scan - DRM allocator eviction roaster data
262 * drm_mm_initialized - checks whether an allocator is initialized
356 * @mm: &drm_mm allocator to walk
358 * This iterator walks over all nodes in the range allocator. It is implemented
368 * @mm: &drm_mm allocator to walk
370 * This iterator walks over all nodes in the range allocator. It is implemented
[all …]
/kernel/linux/linux-5.10/Documentation/trace/
Devents-kmem.rst11 - Per-CPU Allocator Activity
55 a simple indicator of page allocator activity. Pages may be allocated from
56 the per-CPU allocator (high performance) or the buddy allocator.
58 If pages are allocated directly from the buddy allocator, the
74 4. Per-CPU Allocator Activity
81 In front of the page allocator is a per-cpu page allocator. It exists only
/kernel/linux/linux-6.6/Documentation/trace/
Devents-kmem.rst11 - Per-CPU Allocator Activity
55 a simple indicator of page allocator activity. Pages may be allocated from
56 the per-CPU allocator (high performance) or the buddy allocator.
58 If pages are allocated directly from the buddy allocator, the
74 4. Per-CPU Allocator Activity
81 In front of the page allocator is a per-cpu page allocator. It exists only
/kernel/linux/linux-5.10/mm/
DKconfig288 reliably. The page allocator relies on compaction heavily and
302 free pages from the buddy allocator for the purpose of reporting
417 allocator for chunks in 2^N*PAGE_SIZE amounts - which is frequently
419 the excess and return it to the allocator.
422 system allocator, which can cause extra fragmentation, particularly
491 # UP and nommu archs use km based percpu allocator
537 bool "Contiguous Memory Allocator"
542 This enables the Contiguous Memory Allocator which allows other
685 prompt "Compressed cache for swap pages default allocator"
689 Selects the default allocator for the compressed cache for
[all …]
/kernel/linux/linux-5.10/include/trace/events/
Dxdp.h318 __field(const void *, allocator)
325 __entry->allocator = xa->allocator;
328 TP_printk("mem_id=%d mem_type=%s allocator=%p",
331 __entry->allocator
346 __field(const void *, allocator)
355 __entry->allocator = xa->allocator;
360 TP_printk("mem_id=%d mem_type=%s allocator=%p"
364 __entry->allocator,
/kernel/linux/linux-6.6/include/trace/events/
Dxdp.h331 __field(const void *, allocator)
338 __entry->allocator = xa->allocator;
341 TP_printk("mem_id=%d mem_type=%s allocator=%p",
344 __entry->allocator
359 __field(const void *, allocator)
368 __entry->allocator = xa->allocator;
373 TP_printk("mem_id=%d mem_type=%s allocator=%p"
377 __entry->allocator,
/kernel/linux/linux-6.6/drivers/gpu/drm/
Ddrm_mm.c56 * drm_mm provides a simple range allocator. The drivers are free to use the
57 * resource allocator from the linux core if it suits them, the upside of drm_mm
67 * The range allocator also supports reservation of preallocated blocks. This is
71 * after the allocator is initialized, which helps with avoiding looped
93 * some basic allocator dumpers for debugging.
95 * Note that this range allocator is not thread-safe, drivers need to protect
438 * @mm: drm_mm allocator to insert @node into
441 * This functions inserts an already set-up &drm_mm_node into the allocator,
443 * fields must be cleared to 0. This is useful to initialize the allocator with
444 * preallocated objects which must be set-up before the range allocator can be
[all …]
/kernel/linux/linux-5.10/drivers/gpu/drm/
Ddrm_mm.c56 * drm_mm provides a simple range allocator. The drivers are free to use the
57 * resource allocator from the linux core if it suits them, the upside of drm_mm
67 * The range allocator also supports reservation of preallocated blocks. This is
71 * after the allocator is initialized, which helps with avoiding looped
93 * some basic allocator dumpers for debugging.
95 * Note that this range allocator is not thread-safe, drivers need to protect
441 * @mm: drm_mm allocator to insert @node into
444 * This functions inserts an already set-up &drm_mm_node into the allocator,
446 * fields must be cleared to 0. This is useful to initialize the allocator with
447 * preallocated objects which must be set-up before the range allocator can be
[all …]

12345678910>>...45