Home
last modified time | relevance | path

Searched full:allocation (Results 1 – 25 of 4154) sorted by relevance

12345678910>>...167

/kernel/linux/linux-4.19/drivers/acpi/acpica/
Duttrack.c4 * Module Name: uttrack - Memory allocation tracking routines (debug only)
14 * Each memory allocation is tracked via a doubly linked list. Each
32 *allocation);
80 * PARAMETERS: size - Size of the allocation
94 struct acpi_debug_mem_block *allocation; in acpi_ut_allocate_and_track() local
105 allocation = in acpi_ut_allocate_and_track()
107 if (!allocation) { in acpi_ut_allocate_and_track()
109 /* Report allocation error */ in acpi_ut_allocate_and_track()
118 acpi_ut_track_allocation(allocation, size, ACPI_MEM_MALLOC, in acpi_ut_allocate_and_track()
121 acpi_os_free(allocation); in acpi_ut_allocate_and_track()
[all …]
/kernel/linux/linux-5.10/drivers/acpi/acpica/
Duttrack.c4 * Module Name: uttrack - Memory allocation tracking routines (debug only)
14 * Each memory allocation is tracked via a doubly linked list. Each
32 *allocation);
80 * PARAMETERS: size - Size of the allocation
94 struct acpi_debug_mem_block *allocation; in acpi_ut_allocate_and_track() local
105 allocation = in acpi_ut_allocate_and_track()
107 if (!allocation) { in acpi_ut_allocate_and_track()
109 /* Report allocation error */ in acpi_ut_allocate_and_track()
118 acpi_ut_track_allocation(allocation, size, ACPI_MEM_MALLOC, in acpi_ut_allocate_and_track()
121 acpi_os_free(allocation); in acpi_ut_allocate_and_track()
[all …]
/kernel/linux/linux-5.10/tools/testing/selftests/resctrl/
Dmba_test.c3 * Memory Bandwidth Allocation (MBA) test
23 * For each allocation, run 5 times in order to get average values.
27 static int runs_per_allocation, allocation = 100; in mba_setup() local
43 if (allocation < ALLOCATION_MIN || allocation > ALLOCATION_MAX) in mba_setup()
46 sprintf(allocation_str, "%d", allocation); in mba_setup()
49 allocation -= ALLOCATION_STEP; in mba_setup()
56 int allocation, runs; in show_mba_info() local
61 for (allocation = 0; allocation < ALLOCATION_MAX / ALLOCATION_STEP; in show_mba_info()
62 allocation++) { in show_mba_info()
71 for (runs = NUM_OF_RUNS * allocation + 1; in show_mba_info()
[all …]
/kernel/linux/linux-5.10/Documentation/core-api/
Dmemory-allocation.rst4 Memory Allocation Guide
7 Linux provides a variety of APIs for memory allocation. You can
14 Most of the memory allocation APIs use GFP flags to express how that
16 pages", the underlying memory allocation function.
18 Diversity of the allocation APIs combined with the numerous GFP flags
26 Of course there are cases when other allocation APIs and different GFP
45 * If the allocation is performed from an atomic context, e.g interrupt
48 ``GFP_NOWAIT`` allocation is likely to fail. Allocations which
51 will be stressed unless allocation succeeds, you may use ``GFP_ATOMIC``.
66 example may be a hardware allocation that maps data directly into
[all …]
/kernel/linux/linux-4.19/lib/
Dtest_kasan.c38 pr_err("Allocation failed\n"); in kmalloc_oob_right()
54 pr_err("Allocation failed\n"); in kmalloc_oob_left()
70 pr_err("Allocation failed\n"); in kmalloc_node_oob_right()
87 pr_info("kmalloc pagealloc allocation: out-of-bounds to right\n"); in kmalloc_pagealloc_oob_right()
90 pr_err("Allocation failed\n"); in kmalloc_pagealloc_oob_right()
103 pr_info("kmalloc pagealloc allocation: use-after-free\n"); in kmalloc_pagealloc_uaf()
106 pr_err("Allocation failed\n"); in kmalloc_pagealloc_uaf()
119 pr_info("kmalloc pagealloc allocation: invalid-free\n"); in kmalloc_pagealloc_invalid_free()
122 pr_err("Allocation failed\n"); in kmalloc_pagealloc_invalid_free()
137 pr_info("kmalloc large allocation: out-of-bounds to right\n"); in kmalloc_large_oob_right()
[all …]
/kernel/linux/linux-5.10/fs/xfs/libxfs/
Dxfs_ialloc.h22 uint64_t alloc; /* inode phys. allocation bitmap for
40 * To work within the constraint of one allocation per transaction,
42 * allocation to make more free inodes. If an inode is
43 * available without an allocation, agbp would be set to the current
45 * If an allocation needed to be done, agbp would be set to the
46 * inode header of the allocation group and alloc_done set to true.
94 struct xfs_buf *bp, /* allocation group header buffer */
98 * Read in the allocation group header (inode allocation section)
104 xfs_agnumber_t agno, /* allocation group number */
105 struct xfs_buf **bpp); /* allocation group hdr buf */
[all …]
Dxfs_alloc.h20 * Freespace allocation types. Argument to xfs_alloc_[v]extent.
58 xfs_agnumber_t agno; /* allocation group number */
59 xfs_agblock_t agbno; /* allocation group-relative block # */
71 xfs_alloctype_t type; /* allocation type XFS_ALLOCTYPE_... */
72 xfs_alloctype_t otype; /* original allocation type */
74 char wasdel; /* set if allocation was prev delayed */
75 char wasfromfl; /* set if allocation is from freelist */
83 #define XFS_ALLOC_USERDATA (1 << 0)/* allocation is for user data*/
125 * Interface for inode allocation to force the pag data to be initialized.
131 xfs_agnumber_t agno, /* allocation group number */
[all …]
/kernel/linux/linux-4.19/fs/xfs/libxfs/
Dxfs_ialloc.h22 uint64_t alloc; /* inode phys. allocation bitmap for
50 * To work within the constraint of one allocation per transaction,
52 * allocation to make more free inodes. If an inode is
53 * available without an allocation, agbp would be set to the current
55 * If an allocation needed to be done, agbp would be set to the
56 * inode header of the allocation group and alloc_done set to true.
111 struct xfs_buf *bp, /* allocation group header buffer */
115 * Read in the allocation group header (inode allocation section)
121 xfs_agnumber_t agno, /* allocation group number */
122 struct xfs_buf **bpp); /* allocation group hdr buf */
[all …]
Dxfs_alloc.h20 * Freespace allocation types. Argument to xfs_alloc_[v]extent.
59 xfs_agnumber_t agno; /* allocation group number */
60 xfs_agblock_t agbno; /* allocation group-relative block # */
72 xfs_alloctype_t type; /* allocation type XFS_ALLOCTYPE_... */
73 xfs_alloctype_t otype; /* original allocation type */
75 char wasdel; /* set if allocation was prev delayed */
76 char wasfromfl; /* set if allocation is from freelist */
84 #define XFS_ALLOC_USERDATA (1 << 0)/* allocation is for user data*/
86 #define XFS_ALLOC_USERDATA_ZERO (1 << 2)/* zero extent on allocation */
139 * Interface for inode allocation to force the pag data to be initialized.
[all …]
/kernel/linux/linux-5.10/fs/jfs/
Djfs_dmap.c19 * SERIALIZATION of the Block Allocation Map.
21 * the working state of the block allocation map is accessed in
24 * 1) allocation and free requests that start at the dmap
28 * 2) allocation requests that start at dmap control page
46 * the working state of the block allocation map also includes read/
48 * free block count, allocation group level free block counts).
53 * accesses to the persistent state of the block allocation map (limited
139 * FUNCTION: initializate the block allocation map.
216 * FUNCTION: terminate the block allocation map in preparation for
304 * allocation map.
[all …]
Djfs_imap.h21 #define MAXAG 128 /* maximum number of allocation groups */
23 #define AMAPSIZE 512 /* bytes in the IAG allocation maps */
39 * inode allocation map:
41 * inode allocation map consists of
43 * . inode allocation group pages (per 4096 inodes)
47 * inode allocation group page (per 4096 inodes of an AG)
51 __le32 iagnum; /* 4: inode allocation group number */
73 /* allocation bit map: 1 bit per inode (0 - free, 1 - allocated) */
74 __le32 wmap[EXTSPERIAG]; /* 512: working allocation map */
75 __le32 pmap[EXTSPERIAG]; /* 512: persistent allocation map */
[all …]
/kernel/linux/linux-5.10/Documentation/trace/
Devents-kmem.rst5 The kmem tracing system captures events related to object and page allocation
8 - Slab allocation of small objects of unknown type (kmalloc)
9 - Slab allocation of small objects of known type
10 - Page allocation
17 1. Slab allocation of small objects of unknown type
27 internal fragmented as a result of the allocation pattern. By correlating
29 the allocation sites were.
32 2. Slab allocation of small objects of known type
45 3. Page allocation
54 These four events deal with page allocation and freeing. mm_page_alloc is
[all …]
/kernel/linux/linux-4.19/Documentation/trace/
Devents-kmem.rst5 The kmem tracing system captures events related to object and page allocation
8 - Slab allocation of small objects of unknown type (kmalloc)
9 - Slab allocation of small objects of known type
10 - Page allocation
17 1. Slab allocation of small objects of unknown type
27 internal fragmented as a result of the allocation pattern. By correlating
29 the allocation sites were.
32 2. Slab allocation of small objects of known type
45 3. Page allocation
54 These four events deal with page allocation and freeing. mm_page_alloc is
[all …]
/kernel/linux/linux-4.19/fs/jfs/
Djfs_dmap.c32 * SERIALIZATION of the Block Allocation Map.
34 * the working state of the block allocation map is accessed in
37 * 1) allocation and free requests that start at the dmap
41 * 2) allocation requests that start at dmap control page
59 * the working state of the block allocation map also includes read/
61 * free block count, allocation group level free block counts).
66 * accesses to the persistent state of the block allocation map (limited
152 * FUNCTION: initializate the block allocation map.
229 * FUNCTION: terminate the block allocation map in preparation for
317 * allocation map.
[all …]
/kernel/linux/linux-5.10/fs/ocfs2/
Dreservations.h7 * Allocation reservations function prototypes and structures.
84 * allocation mirror bitmap.
115 * @cstart: start of proposed allocation
116 * @clen: length (in clusters) of proposed allocation
123 * On success, zero is returned and the valid allocation area is set in cstart
136 * @cstart: start of allocation in clusters
137 * @clen: end of allocation in clusters.
139 * Tell the reservation code that bits were used to fulfill allocation in
143 * reservations bitmap. If resv is passed, it's next allocation window will be
/kernel/linux/linux-5.10/mm/
Ddmapool.c47 size_t allocation; member
53 struct dma_page { /* cacheable header for 'allocation' bytes */
95 pages * (pool->allocation / pool->size), in show_pools()
127 * Return: a dma allocation pool with the requested characteristics, or
134 size_t allocation; in dma_pool_create() local
148 allocation = max_t(size_t, size, PAGE_SIZE); in dma_pool_create()
151 boundary = allocation; in dma_pool_create()
167 retval->allocation = allocation; in dma_pool_create()
216 } while (offset < pool->allocation); in pool_initialise_page()
226 page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation, in pool_alloc_page()
[all …]
Dzbud.c38 * allocation function, zbud_alloc(), returns an opaque handle to the user,
41 * allocation data and unmap the handle with zbud_unmap() when operations
42 * on the allocation data are complete.
61 * NCHUNKS_ORDER determines the internal allocation granularity, effectively
64 * allocation granularity will be in chunks of size PAGE_SIZE/64. As one chunk
230 /* Converts an allocation size in bytes to size in zbud chunks */
268 * Add CHUNK_SIZE to the handle if it is the first allocation to jump in encode_handle()
304 * Return: pointer to the new zbud pool or NULL if the metadata allocation
339 * @size: size in bytes of the desired allocation
341 * @handle: handle of the new allocation
[all …]
/kernel/linux/linux-4.19/fs/ocfs2/
Dreservations.h6 * Allocation reservations function prototypes and structures.
92 * allocation mirror bitmap.
123 * @cstart: start of proposed allocation
124 * @clen: length (in clusters) of proposed allocation
131 * On success, zero is returned and the valid allocation area is set in cstart
144 * @cstart: start of allocation in clusters
145 * @clen: end of allocation in clusters.
147 * Tell the reservation code that bits were used to fulfill allocation in
151 * reservations bitmap. If resv is passed, it's next allocation window will be
/kernel/linux/linux-5.10/arch/x86/include/asm/
Dhw_irq.h65 * irq_alloc_info - X86 specific interrupt allocation info
66 * @type: X86 specific allocation type
67 * @flags: Flags for allocation tweaks
70 * @mask: CPU mask for vector allocation
72 * @data: Allocation specific data
74 * @ioapic: IOAPIC specific allocation data
75 * @uv: UV specific allocation data
/kernel/linux/linux-4.19/mm/
Ddmapool.c50 size_t allocation; member
56 struct dma_page { /* cacheable header for 'allocation' bytes */
98 pages * (pool->allocation / pool->size), in show_pools()
119 * Returns a dma allocation pool with the requested characteristics, or
135 size_t allocation; in dma_pool_create() local
151 allocation = max_t(size_t, size, PAGE_SIZE); in dma_pool_create()
154 boundary = allocation; in dma_pool_create()
170 retval->allocation = allocation; in dma_pool_create()
219 } while (offset < pool->allocation); in pool_initialise_page()
229 page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation, in pool_alloc_page()
[all …]
Dzbud.c37 * allocation function, zbud_alloc(), returns an opaque handle to the user,
40 * allocation data and unmap the handle with zbud_unmap() when operations
41 * on the allocation data are complete.
60 * NCHUNKS_ORDER determines the internal allocation granularity, effectively
63 * allocation granularity will be in chunks of size PAGE_SIZE/64. As one chunk
228 /* Converts an allocation size in bytes to size in zbud chunks */
266 * Add CHUNK_SIZE to the handle if it is the first allocation to jump in encode_handle()
302 * Return: pointer to the new zbud pool or NULL if the metadata allocation
337 * @size: size in bytes of the desired allocation
339 * @handle: handle of the new allocation
[all …]
/kernel/linux/linux-5.10/Documentation/vm/
Dpage_frags.rst13 simple allocation framework for page fragments. This is used by the
19 cache is needed. This provides a central point for the fragment allocation
22 which can be expensive at allocation time. However due to the nature of
25 to be disabled when executing the fragment allocation.
28 allocation. The netdev_alloc_cache is used by callers making use of the
43 avoid calling get_page per allocation.
/kernel/linux/linux-4.19/Documentation/vm/
Dpage_frags.rst13 simple allocation framework for page fragments. This is used by the
19 cache is needed. This provides a central point for the fragment allocation
22 which can be expensive at allocation time. However due to the nature of
25 to be disabled when executing the fragment allocation.
28 allocation. The netdev_alloc_cache is used by callers making use of the
43 avoid calling get_page per allocation.
/kernel/linux/linux-4.19/Documentation/admin-guide/mm/
Dnuma_memory_policy.rst40 use "local allocation" described below. However, during boot
84 A VMA policy will govern the allocation of pages that back
140 support allocation at fault time--a.k.a lazy allocation--so hugetlbfs
142 Although hugetlbfs segments now support lazy allocation, their support
199 closest to the node where the allocation takes place.
202 This mode specifies that the allocation should be attempted
204 allocation fails, the kernel will search other nodes, in order
211 and the policy is interpreted as local allocation. "Local"
212 allocation policy can be viewed as a Preferred policy that
213 starts at the node containing the cpu where the allocation
[all …]
/kernel/linux/linux-5.10/Documentation/admin-guide/mm/
Dnuma_memory_policy.rst40 use "local allocation" described below. However, during boot
84 A VMA policy will govern the allocation of pages that back
140 support allocation at fault time--a.k.a lazy allocation--so hugetlbfs
142 Although hugetlbfs segments now support lazy allocation, their support
199 closest to the node where the allocation takes place.
202 This mode specifies that the allocation should be attempted
204 allocation fails, the kernel will search other nodes, in order
211 and the policy is interpreted as local allocation. "Local"
212 allocation policy can be viewed as a Preferred policy that
213 starts at the node containing the cpu where the allocation
[all …]

12345678910>>...167