Home
last modified time | relevance | path

Searched full:cma (Results 1 – 25 of 260) sorted by relevance

1234567891011

/kernel/linux/linux-5.10/mm/
Dcma_debug.c3 * CMA DebugFS Interface
10 #include <linux/cma.h>
16 #include "cma.h"
36 struct cma *cma = data; in cma_used_get() local
39 mutex_lock(&cma->lock); in cma_used_get()
41 used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma)); in cma_used_get()
42 mutex_unlock(&cma->lock); in cma_used_get()
43 *val = (u64)used << cma->order_per_bit; in cma_used_get()
51 struct cma *cma = data; in cma_maxchunk_get() local
54 unsigned long bitmap_maxno = cma_bitmap_maxno(cma); in cma_maxchunk_get()
[all …]
Dcma.c15 #define pr_fmt(fmt) "cma: " fmt
31 #include <linux/cma.h>
35 #include <trace/events/cma.h>
37 #include "cma.h"
39 struct cma cma_areas[MAX_CMA_AREAS];
43 phys_addr_t cma_get_base(const struct cma *cma) in cma_get_base() argument
45 return PFN_PHYS(cma->base_pfn); in cma_get_base()
48 unsigned long cma_get_size(const struct cma *cma) in cma_get_size() argument
50 return cma->count << PAGE_SHIFT; in cma_get_size()
53 const char *cma_get_name(const struct cma *cma) in cma_get_name() argument
[all …]
Dcma.h7 struct cma { struct
21 extern struct cma cma_areas[MAX_CMA_AREAS]; argument
24 static inline unsigned long cma_bitmap_maxno(struct cma *cma) in cma_bitmap_maxno() argument
26 return cma->count >> cma->order_per_bit; in cma_bitmap_maxno()
DKconfig292 depends on (NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION || CMA) && MMU
308 def_bool (MEMORY_ISOLATION && COMPACTION) || CMA
516 config CMA config
524 CMA reserves a region of memory and allows only movable pages to
532 bool "CMA debug messages (DEVELOPMENT)"
533 depends on DEBUG_KERNEL && CMA
535 Turns on debug messages in CMA. This produces KERN_DEBUG
536 messages for every CMA call as well as various messages while
541 bool "CMA debugfs interface"
542 depends on CMA && DEBUG_FS
[all …]
/kernel/linux/linux-4.19/mm/
Dcma_debug.c3 * CMA DebugFS Interface
10 #include <linux/cma.h>
16 #include "cma.h"
38 struct cma *cma = data; in cma_used_get() local
41 mutex_lock(&cma->lock); in cma_used_get()
43 used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma)); in cma_used_get()
44 mutex_unlock(&cma->lock); in cma_used_get()
45 *val = (u64)used << cma->order_per_bit; in cma_used_get()
53 struct cma *cma = data; in cma_maxchunk_get() local
56 unsigned long bitmap_maxno = cma_bitmap_maxno(cma); in cma_maxchunk_get()
[all …]
Dcma.c19 #define pr_fmt(fmt) "cma: " fmt
35 #include <linux/cma.h>
39 #include <trace/events/cma.h>
41 #include "cma.h"
43 struct cma cma_areas[MAX_CMA_AREAS];
47 phys_addr_t cma_get_base(const struct cma *cma) in cma_get_base() argument
49 return PFN_PHYS(cma->base_pfn); in cma_get_base()
52 unsigned long cma_get_size(const struct cma *cma) in cma_get_size() argument
54 return cma->count << PAGE_SHIFT; in cma_get_size()
57 const char *cma_get_name(const struct cma *cma) in cma_get_name() argument
[all …]
Dcma.h5 struct cma { struct
18 extern struct cma cma_areas[MAX_CMA_AREAS]; argument
21 static inline unsigned long cma_bitmap_maxno(struct cma *cma) in cma_bitmap_maxno() argument
23 return cma->count >> cma->order_per_bit; in cma_bitmap_maxno()
/kernel/linux/linux-5.10/include/linux/
Dcma.h10 * There is always at least global CMA area and a few optional
23 struct cma;
26 extern phys_addr_t cma_get_base(const struct cma *cma);
27 extern unsigned long cma_get_size(const struct cma *cma);
28 extern const char *cma_get_name(const struct cma *cma);
33 bool fixed, const char *name, struct cma **res_cma,
38 bool fixed, const char *name, struct cma **res_cma) in cma_declare_contiguous()
46 struct cma **res_cma);
47 extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
49 extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
[all …]
/kernel/linux/linux-4.19/include/linux/
Dcma.h9 * There is always at least global CMA area and a few optional
20 struct cma;
23 extern phys_addr_t cma_get_base(const struct cma *cma);
24 extern unsigned long cma_get_size(const struct cma *cma);
25 extern const char *cma_get_name(const struct cma *cma);
30 bool fixed, const char *name, struct cma **res_cma);
34 struct cma **res_cma);
35 extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
37 extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
39 extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);
Ddma-contiguous.h20 * The Contiguous Memory Allocator (CMA) makes it possible to
41 * CMA tries to solve this issue by operating on memory regions
48 * CMA should not be used by the device drivers directly. It is
58 struct cma;
63 extern struct cma *dma_contiguous_default_area;
65 static inline struct cma *dev_get_cma_area(struct device *dev) in dev_get_cma_area()
72 static inline void dev_set_cma_area(struct device *dev, struct cma *cma) in dev_set_cma_area() argument
75 dev->cma_area = cma; in dev_set_cma_area()
78 static inline void dma_contiguous_set_default(struct cma *cma) in dma_contiguous_set_default() argument
80 dma_contiguous_default_area = cma; in dma_contiguous_set_default()
[all …]
/kernel/linux/linux-5.10/Documentation/admin-guide/mm/
Dcma_debugfs.rst2 CMA Debugfs Interface
5 The CMA debugfs interface is useful to retrieve basic information out of the
6 different CMA areas and to test allocation/release in each of the areas.
8 Each CMA zone represents a directory under <debugfs>/cma/, indexed by the
9 kernel's CMA index. So the first CMA zone would be:
11 <debugfs>/cma/cma-0
16 - [RO] count: Amount of memory in the CMA area.
19 - [WO] alloc: Allocate N pages from that CMA area. For example::
21 echo 5 > <debugfs>/cma/cma-2/alloc
23 would try to allocate 5 pages from the cma-2 area.
[all …]
/kernel/linux/linux-4.19/Documentation/cma/
Ddebugfs.txt1 The CMA debugfs interface is useful to retrieve basic information out of the
2 different CMA areas and to test allocation/release in each of the areas.
4 Each CMA zone represents a directory under <debugfs>/cma/, indexed by the
5 kernel's CMA index. So the first CMA zone would be:
7 <debugfs>/cma/cma-0
12 - [RO] count: Amount of memory in the CMA area.
15 - [WO] alloc: Allocate N pages from that CMA area. For example:
17 echo 5 > <debugfs>/cma/cma-2/alloc
19 would try to allocate 5 pages from the cma-2 area.
21 - [WO] free: Free N pages from that CMA area, similar to the above.
/kernel/linux/linux-5.10/kernel/dma/
Dcontiguous.c11 * The Contiguous Memory Allocator (CMA) makes it possible to
32 * CMA tries to solve this issue by operating on memory regions
38 #define pr_fmt(fmt) "cma: " fmt
52 #include <linux/cma.h>
60 struct cma *dma_contiguous_default_area;
63 * Default global CMA area size can be defined in kernel's .config.
69 * Users, who want to set the size of global CMA area for their system
70 * should use cma= kernel parameter.
97 early_param("cma", early_cma);
101 static struct cma *dma_contiguous_pernuma_area[MAX_NUMNODES];
[all …]
DKconfig112 depends on HAVE_DMA_CONTIGUOUS && CMA
118 You can disable CMA by specifying "cma=0" on the kernel's command
130 Enable this option to get pernuma CMA areas so that devices like
133 You can set the size of pernuma CMA by specifying "cma_pernuma=size"
145 Memory Allocator. If the size of 0 is selected, CMA is disabled by
146 default, but it can be enabled by passing cma=size[MG] to the kernel.
157 If 0 percent is selected, CMA is disabled by default, but it can be
158 enabled by passing cma=size[MG] to the kernel.
Dpool.c6 #include <linux/cma.h>
63 struct cma *cma; in cma_in_zone() local
65 cma = dev_get_cma_area(NULL); in cma_in_zone()
66 if (!cma) in cma_in_zone()
69 size = cma_get_size(cma); in cma_in_zone()
73 /* CMA can't cross zone boundaries, see cma_activate_area() */ in cma_in_zone()
74 end = cma_get_base(cma) + size - 1; in cma_in_zone()
/kernel/linux/linux-5.10/drivers/staging/android/ion/
Dion_cma_heap.c3 * ION Memory Allocator CMA heap exporter
13 #include <linux/cma.h>
21 struct cma *cma; member
26 /* ION CMA heap operations functions */
42 pages = cma_alloc(cma_heap->cma, nr_pages, align, false); in ion_cma_allocate()
79 cma_release(cma_heap->cma, pages, nr_pages); in ion_cma_allocate()
90 cma_release(cma_heap->cma, pages, nr_pages); in ion_cma_free()
104 static struct ion_heap *__ion_cma_heap_create(struct cma *cma) in __ion_cma_heap_create() argument
114 cma_heap->cma = cma; in __ion_cma_heap_create()
119 static int __ion_add_cma_heaps(struct cma *cma, void *data) in __ion_add_cma_heaps() argument
[all …]
/kernel/linux/linux-4.19/drivers/staging/android/ion/
Dion_cma_heap.c13 #include <linux/cma.h>
21 struct cma *cma; member
26 /* ION CMA heap operations functions */
42 pages = cma_alloc(cma_heap->cma, nr_pages, align, false); in ion_cma_allocate()
79 cma_release(cma_heap->cma, pages, nr_pages); in ion_cma_allocate()
90 cma_release(cma_heap->cma, pages, nr_pages); in ion_cma_free()
104 static struct ion_heap *__ion_cma_heap_create(struct cma *cma) in __ion_cma_heap_create() argument
116 * used to make the link with reserved CMA memory in __ion_cma_heap_create()
118 cma_heap->cma = cma; in __ion_cma_heap_create()
123 static int __ion_add_cma_heaps(struct cma *cma, void *data) in __ion_add_cma_heaps() argument
[all …]
/kernel/linux/linux-5.10/drivers/dma-buf/heaps/
Dcma_heap.c3 * DMABUF CMA heap exporter
9 #include <linux/cma.h>
26 struct cma *cma; member
38 cma_release(cma_heap->cma, cma_pages, nr_pages); in cma_heap_free()
42 /* dmabuf heap CMA operations functions */
69 cma_pages = cma_alloc(cma_heap->cma, nr_pages, align, false); in cma_heap_allocate()
130 cma_release(cma_heap->cma, cma_pages, nr_pages); in cma_heap_allocate()
140 static int __add_cma_heap(struct cma *cma, void *data) in __add_cma_heap() argument
148 cma_heap->cma = cma; in __add_cma_heap()
150 exp_info.name = cma_get_name(cma); in __add_cma_heap()
[all …]
DKconfig9 bool "DMA-BUF CMA Heap"
12 Choose this option to enable dma-buf CMA heap. This heap is backed
13 by the Contiguous Memory Allocator (CMA). If your system has these
/kernel/linux/linux-4.19/kernel/dma/
Dcontiguous.c10 #define pr_fmt(fmt) "cma: " fmt
25 #include <linux/cma.h>
33 struct cma *dma_contiguous_default_area;
36 * Default global CMA area size can be defined in kernel's .config.
42 * Users, who want to set the size of global CMA area for their system
43 * should use cma= kernel parameter.
69 early_param("cma", early_cma);
150 * @res_cma: Pointer to store the created cma region.
163 phys_addr_t limit, struct cma **res_cma, in dma_contiguous_reserve_area()
250 struct cma *cma; in rmem_cma_setup() local
[all …]
/kernel/linux/linux-5.10/drivers/gpu/drm/
Ddrm_gem_cma_helper.c3 * drm gem CMA (contiguous memory allocator) helper functions
26 * DOC: cma helpers
31 * The DRM GEM/CMA helpers use this allocator as a means to provide buffer
37 * __drm_gem_cma_create - Create a GEM CMA object without allocating memory
41 * This function creates and initializes a GEM CMA object of the given size,
85 * This function creates a CMA GEM object and allocates a contiguous chunk of
130 * This function creates a CMA GEM object, allocating a physically contiguous
167 * drm_gem_cma_free_object - free resources associated with a CMA GEM object
170 * This function frees the backing memory of the CMA GEM object, cleans up the
173 * Drivers using the CMA helpers should set this as their
[all …]
Ddrm_fb_cma_helper.c3 * drm kms/fb cma (contiguous memory allocator) helper functions
21 * DOC: framebuffer cma helper functions
23 * Provides helper functions for creating a cma (contiguous memory allocator)
27 * callback function to create a cma backed framebuffer.
31 * drm_fb_cma_get_gem_obj() - Get CMA GEM object for framebuffer
35 * Return the CMA GEM object for given framebuffer.
59 * Return the CMA GEM address for given framebuffer.
/kernel/linux/linux-4.19/drivers/gpu/drm/
Ddrm_gem_cma_helper.c2 * drm gem CMA (contiguous memory allocator) helper functions
33 * DOC: cma helpers
38 * The DRM GEM/CMA helpers use this allocator as a means to provide buffer
44 * __drm_gem_cma_create - Create a GEM CMA object without allocating memory
48 * This function creates and initializes a GEM CMA object of the given size,
92 * This function creates a CMA GEM object and allocates a contiguous chunk of
137 * This function creates a CMA GEM object, allocating a physically contiguous
174 * drm_gem_cma_free_object - free resources associated with a CMA GEM object
177 * This function frees the backing memory of the CMA GEM object, cleans up the
179 * Drivers using the CMA helpers should set this as their
[all …]
Ddrm_fb_cma_helper.c2 * drm kms/fb cma (contiguous memory allocator) helper functions
35 * DOC: framebuffer cma helper functions
37 * Provides helper functions for creating a cma (contiguous memory allocator)
41 * callback function to create a cma backed framebuffer.
43 * An fbdev framebuffer backed by cma is also available by calling
53 * drm_fb_cma_get_gem_obj() - Get CMA GEM object for framebuffer
57 * Return the CMA GEM object for given framebuffer.
79 * Return the CMA GEM address for given framebuffer.
/kernel/linux/linux-5.10/include/drm/
Ddrm_gem_cma_helper.h12 * struct drm_gem_cma_object - GEM object backed by CMA memory allocations
25 /* For objects with DMA memory allocated by GEM CMA */
40 * DEFINE_DRM_GEM_CMA_FOPS() - macro to generate file operations for CMA drivers
43 * This macro autogenerates a suitable &struct file_operations for CMA based
113 * DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE - CMA GEM driver operations
134 * DRM_GEM_CMA_DRIVER_OPS - CMA GEM driver operations
149 * DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE - CMA GEM driver operations
173 * DRM_GEM_CMA_DRIVER_OPS_VMAP - CMA GEM driver operations ensuring a virtual

1234567891011