• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Contiguous Memory Allocator
4  *
5  * Copyright (c) 2010-2011 by Samsung Electronics.
6  * Copyright IBM Corporation, 2013
7  * Copyright LG Electronics Inc., 2014
8  * Written by:
9  *	Marek Szyprowski <m.szyprowski@samsung.com>
10  *	Michal Nazarewicz <mina86@mina86.com>
11  *	Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
12  *	Joonsoo Kim <iamjoonsoo.kim@lge.com>
13  */
14 
15 #define pr_fmt(fmt) "cma: " fmt
16 
17 #ifdef CONFIG_CMA_DEBUG
18 #ifndef DEBUG
19 #  define DEBUG
20 #endif
21 #endif
22 #define CREATE_TRACE_POINTS
23 
24 #include <linux/memblock.h>
25 #include <linux/err.h>
26 #include <linux/mm.h>
27 #include <linux/module.h>
28 #include <linux/mutex.h>
29 #include <linux/sizes.h>
30 #include <linux/slab.h>
31 #include <linux/log2.h>
32 #include <linux/cma.h>
33 #include <linux/highmem.h>
34 #include <linux/io.h>
35 #include <linux/kmemleak.h>
36 #include <linux/sched.h>
37 #include <linux/jiffies.h>
38 #include <trace/events/cma.h>
39 
40 #undef CREATE_TRACE_POINTS
41 #include <trace/hooks/mm.h>
42 
43 #include "cma.h"
44 
45 extern void lru_cache_disable(void);
46 extern void lru_cache_enable(void);
47 
48 struct cma cma_areas[MAX_CMA_AREAS];
49 unsigned cma_area_count;
50 static DEFINE_MUTEX(cma_mutex);
51 
cma_get_base(const struct cma * cma)52 phys_addr_t cma_get_base(const struct cma *cma)
53 {
54 	return PFN_PHYS(cma->base_pfn);
55 }
56 
cma_get_size(const struct cma * cma)57 unsigned long cma_get_size(const struct cma *cma)
58 {
59 	return cma->count << PAGE_SHIFT;
60 }
61 
cma_get_name(const struct cma * cma)62 const char *cma_get_name(const struct cma *cma)
63 {
64 	return cma->name;
65 }
66 EXPORT_SYMBOL_GPL(cma_get_name);
67 
cma_bitmap_aligned_mask(const struct cma * cma,unsigned int align_order)68 static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
69 					     unsigned int align_order)
70 {
71 	if (align_order <= cma->order_per_bit)
72 		return 0;
73 	return (1UL << (align_order - cma->order_per_bit)) - 1;
74 }
75 
76 /*
77  * Find the offset of the base PFN from the specified align_order.
78  * The value returned is represented in order_per_bits.
79  */
cma_bitmap_aligned_offset(const struct cma * cma,unsigned int align_order)80 static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
81 					       unsigned int align_order)
82 {
83 	return (cma->base_pfn & ((1UL << align_order) - 1))
84 		>> cma->order_per_bit;
85 }
86 
cma_bitmap_pages_to_bits(const struct cma * cma,unsigned long pages)87 static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
88 					      unsigned long pages)
89 {
90 	return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
91 }
92 
cma_clear_bitmap(struct cma * cma,unsigned long pfn,unsigned int count)93 static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
94 			     unsigned int count)
95 {
96 	unsigned long bitmap_no, bitmap_count;
97 
98 	bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
99 	bitmap_count = cma_bitmap_pages_to_bits(cma, count);
100 
101 	mutex_lock(&cma->lock);
102 	bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
103 	mutex_unlock(&cma->lock);
104 }
105 
cma_activate_area(struct cma * cma)106 static void __init cma_activate_area(struct cma *cma)
107 {
108 	unsigned long base_pfn = cma->base_pfn, pfn;
109 	struct zone *zone;
110 
111 	cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL);
112 	if (!cma->bitmap)
113 		goto out_error;
114 
115 	/*
116 	 * alloc_contig_range() requires the pfn range specified to be in the
117 	 * same zone. Simplify by forcing the entire CMA resv range to be in the
118 	 * same zone.
119 	 */
120 	WARN_ON_ONCE(!pfn_valid(base_pfn));
121 	zone = page_zone(pfn_to_page(base_pfn));
122 	for (pfn = base_pfn + 1; pfn < base_pfn + cma->count; pfn++) {
123 		WARN_ON_ONCE(!pfn_valid(pfn));
124 		if (page_zone(pfn_to_page(pfn)) != zone)
125 			goto not_in_zone;
126 	}
127 
128 	for (pfn = base_pfn; pfn < base_pfn + cma->count;
129 	     pfn += pageblock_nr_pages)
130 		init_cma_reserved_pageblock(pfn_to_page(pfn));
131 
132 	mutex_init(&cma->lock);
133 
134 #ifdef CONFIG_CMA_DEBUGFS
135 	INIT_HLIST_HEAD(&cma->mem_head);
136 	spin_lock_init(&cma->mem_head_lock);
137 #endif
138 
139 	return;
140 
141 not_in_zone:
142 	bitmap_free(cma->bitmap);
143 out_error:
144 	/* Expose all pages to the buddy, they are useless for CMA. */
145 	for (pfn = base_pfn; pfn < base_pfn + cma->count; pfn++)
146 		free_reserved_page(pfn_to_page(pfn));
147 	totalcma_pages -= cma->count;
148 	cma->count = 0;
149 	pr_err("CMA area %s could not be activated\n", cma->name);
150 	return;
151 }
152 
cma_init_reserved_areas(void)153 static int __init cma_init_reserved_areas(void)
154 {
155 	int i;
156 
157 	for (i = 0; i < cma_area_count; i++)
158 		cma_activate_area(&cma_areas[i]);
159 
160 	return 0;
161 }
162 core_initcall(cma_init_reserved_areas);
163 
164 /**
165  * cma_init_reserved_mem() - create custom contiguous area from reserved memory
166  * @base: Base address of the reserved area
167  * @size: Size of the reserved area (in bytes),
168  * @order_per_bit: Order of pages represented by one bit on bitmap.
169  * @name: The name of the area. If this parameter is NULL, the name of
170  *        the area will be set to "cmaN", where N is a running counter of
171  *        used areas.
172  * @res_cma: Pointer to store the created cma region.
173  *
174  * This function creates custom contiguous area from already reserved memory.
175  */
cma_init_reserved_mem(phys_addr_t base,phys_addr_t size,unsigned int order_per_bit,const char * name,struct cma ** res_cma)176 int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
177 				 unsigned int order_per_bit,
178 				 const char *name,
179 				 struct cma **res_cma)
180 {
181 	struct cma *cma;
182 	phys_addr_t alignment;
183 
184 	/* Sanity checks */
185 	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
186 		pr_err("Not enough slots for CMA reserved regions!\n");
187 		return -ENOSPC;
188 	}
189 
190 	if (!size || !memblock_is_region_reserved(base, size))
191 		return -EINVAL;
192 
193 	/* ensure minimal alignment required by mm core */
194 	alignment = PAGE_SIZE <<
195 			max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
196 
197 	/* alignment should be aligned with order_per_bit */
198 	if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
199 		return -EINVAL;
200 
201 	if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
202 		return -EINVAL;
203 
204 	/*
205 	 * Each reserved area must be initialised later, when more kernel
206 	 * subsystems (like slab allocator) are available.
207 	 */
208 	cma = &cma_areas[cma_area_count];
209 
210 	if (name)
211 		snprintf(cma->name, CMA_MAX_NAME, name);
212 	else
213 		snprintf(cma->name, CMA_MAX_NAME,  "cma%d\n", cma_area_count);
214 
215 	cma->base_pfn = PFN_DOWN(base);
216 	cma->count = size >> PAGE_SHIFT;
217 	cma->order_per_bit = order_per_bit;
218 	*res_cma = cma;
219 	cma_area_count++;
220 	totalcma_pages += (size / PAGE_SIZE);
221 
222 	return 0;
223 }
224 
225 /**
226  * cma_declare_contiguous_nid() - reserve custom contiguous area
227  * @base: Base address of the reserved area optional, use 0 for any
228  * @size: Size of the reserved area (in bytes),
229  * @limit: End address of the reserved memory (optional, 0 for any).
230  * @alignment: Alignment for the CMA area, should be power of 2 or zero
231  * @order_per_bit: Order of pages represented by one bit on bitmap.
232  * @fixed: hint about where to place the reserved area
233  * @name: The name of the area. See function cma_init_reserved_mem()
234  * @res_cma: Pointer to store the created cma region.
235  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
236  *
237  * This function reserves memory from early allocator. It should be
238  * called by arch specific code once the early allocator (memblock or bootmem)
239  * has been activated and all other subsystems have already allocated/reserved
240  * memory. This function allows to create custom reserved areas.
241  *
242  * If @fixed is true, reserve contiguous area at exactly @base.  If false,
243  * reserve in range from @base to @limit.
244  */
cma_declare_contiguous_nid(phys_addr_t base,phys_addr_t size,phys_addr_t limit,phys_addr_t alignment,unsigned int order_per_bit,bool fixed,const char * name,struct cma ** res_cma,int nid)245 int __init cma_declare_contiguous_nid(phys_addr_t base,
246 			phys_addr_t size, phys_addr_t limit,
247 			phys_addr_t alignment, unsigned int order_per_bit,
248 			bool fixed, const char *name, struct cma **res_cma,
249 			int nid)
250 {
251 	phys_addr_t memblock_end = memblock_end_of_DRAM();
252 	phys_addr_t highmem_start;
253 	int ret = 0;
254 
255 	/*
256 	 * We can't use __pa(high_memory) directly, since high_memory
257 	 * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly)
258 	 * complain. Find the boundary by adding one to the last valid
259 	 * address.
260 	 */
261 	highmem_start = __pa(high_memory - 1) + 1;
262 	pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
263 		__func__, &size, &base, &limit, &alignment);
264 
265 	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
266 		pr_err("Not enough slots for CMA reserved regions!\n");
267 		return -ENOSPC;
268 	}
269 
270 	if (!size)
271 		return -EINVAL;
272 
273 	if (alignment && !is_power_of_2(alignment))
274 		return -EINVAL;
275 
276 	/*
277 	 * Sanitise input arguments.
278 	 * Pages both ends in CMA area could be merged into adjacent unmovable
279 	 * migratetype page by page allocator's buddy algorithm. In the case,
280 	 * you couldn't get a contiguous memory, which is not what we want.
281 	 */
282 	alignment = max(alignment,  (phys_addr_t)PAGE_SIZE <<
283 			  max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
284 	if (fixed && base & (alignment - 1)) {
285 		ret = -EINVAL;
286 		pr_err("Region at %pa must be aligned to %pa bytes\n",
287 			&base, &alignment);
288 		goto err;
289 	}
290 	base = ALIGN(base, alignment);
291 	size = ALIGN(size, alignment);
292 	limit &= ~(alignment - 1);
293 
294 	if (!base)
295 		fixed = false;
296 
297 	/* size should be aligned with order_per_bit */
298 	if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
299 		return -EINVAL;
300 
301 	/*
302 	 * If allocating at a fixed base the request region must not cross the
303 	 * low/high memory boundary.
304 	 */
305 	if (fixed && base < highmem_start && base + size > highmem_start) {
306 		ret = -EINVAL;
307 		pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
308 			&base, &highmem_start);
309 		goto err;
310 	}
311 
312 	/*
313 	 * If the limit is unspecified or above the memblock end, its effective
314 	 * value will be the memblock end. Set it explicitly to simplify further
315 	 * checks.
316 	 */
317 	if (limit == 0 || limit > memblock_end)
318 		limit = memblock_end;
319 
320 	if (base + size > limit) {
321 		ret = -EINVAL;
322 		pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n",
323 			&size, &base, &limit);
324 		goto err;
325 	}
326 
327 	/* Reserve memory */
328 	if (fixed) {
329 		if (memblock_is_region_reserved(base, size) ||
330 		    memblock_reserve(base, size) < 0) {
331 			ret = -EBUSY;
332 			goto err;
333 		}
334 	} else {
335 		phys_addr_t addr = 0;
336 
337 		/*
338 		 * All pages in the reserved area must come from the same zone.
339 		 * If the requested region crosses the low/high memory boundary,
340 		 * try allocating from high memory first and fall back to low
341 		 * memory in case of failure.
342 		 */
343 		if (base < highmem_start && limit > highmem_start) {
344 			addr = memblock_alloc_range_nid(size, alignment,
345 					highmem_start, limit, nid, true);
346 			limit = highmem_start;
347 		}
348 
349 		/*
350 		 * If there is enough memory, try a bottom-up allocation first.
351 		 * It will place the new cma area close to the start of the node
352 		 * and guarantee that the compaction is moving pages out of the
353 		 * cma area and not into it.
354 		 * Avoid using first 4GB to not interfere with constrained zones
355 		 * like DMA/DMA32.
356 		 */
357 #ifdef CONFIG_PHYS_ADDR_T_64BIT
358 		if (!memblock_bottom_up() && memblock_end >= SZ_4G + size) {
359 			memblock_set_bottom_up(true);
360 			addr = memblock_alloc_range_nid(size, alignment, SZ_4G,
361 							limit, nid, true);
362 			memblock_set_bottom_up(false);
363 		}
364 #endif
365 
366 		if (!addr) {
367 			addr = memblock_alloc_range_nid(size, alignment, base,
368 					limit, nid, true);
369 			if (!addr) {
370 				ret = -ENOMEM;
371 				goto err;
372 			}
373 		}
374 
375 		/*
376 		 * kmemleak scans/reads tracked objects for pointers to other
377 		 * objects but this address isn't mapped and accessible
378 		 */
379 		kmemleak_ignore_phys(addr);
380 		base = addr;
381 	}
382 
383 	ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
384 	if (ret)
385 		goto free_mem;
386 
387 	pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
388 		&base);
389 	return 0;
390 
391 free_mem:
392 	memblock_free(base, size);
393 err:
394 	pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
395 	return ret;
396 }
397 
398 #ifdef CONFIG_CMA_DEBUG
cma_debug_show_areas(struct cma * cma)399 static void cma_debug_show_areas(struct cma *cma)
400 {
401 	unsigned long next_zero_bit, next_set_bit, nr_zero;
402 	unsigned long start = 0;
403 	unsigned long nr_part, nr_total = 0;
404 	unsigned long nbits = cma_bitmap_maxno(cma);
405 
406 	mutex_lock(&cma->lock);
407 	pr_info("number of available pages: ");
408 	for (;;) {
409 		next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start);
410 		if (next_zero_bit >= nbits)
411 			break;
412 		next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit);
413 		nr_zero = next_set_bit - next_zero_bit;
414 		nr_part = nr_zero << cma->order_per_bit;
415 		pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part,
416 			next_zero_bit);
417 		nr_total += nr_part;
418 		start = next_zero_bit + nr_zero;
419 	}
420 	pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count);
421 	mutex_unlock(&cma->lock);
422 }
423 #else
cma_debug_show_areas(struct cma * cma)424 static inline void cma_debug_show_areas(struct cma *cma) { }
425 #endif
426 
427 /**
428  * cma_alloc() - allocate pages from contiguous area
429  * @cma:   Contiguous memory region for which the allocation is performed.
430  * @count: Requested number of pages.
431  * @align: Requested alignment of pages (in PAGE_SIZE order).
432  * @gfp_mask: GFP mask to use during the cma allocation.
433  *
434  * This function allocates part of contiguous memory on specific
435  * contiguous memory area.
436  */
cma_alloc(struct cma * cma,size_t count,unsigned int align,gfp_t gfp_mask)437 struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
438 		       gfp_t gfp_mask)
439 {
440 	unsigned long mask, offset;
441 	unsigned long pfn = -1;
442 	unsigned long start = 0;
443 	unsigned long bitmap_maxno, bitmap_no, bitmap_count;
444 	size_t i;
445 	struct page *page = NULL;
446 	int ret = -ENOMEM;
447 	int num_attempts = 0;
448 	int max_retries = 5;
449 	s64 ts;
450 	struct cma_alloc_info cma_info = {0};
451 
452 	trace_android_vh_cma_alloc_start(&ts);
453 
454 	if (!cma || !cma->count || !cma->bitmap)
455 		goto out;
456 
457 	pr_debug("%s(cma %p, count %zu, align %d gfp_mask 0x%x)\n", __func__,
458 			(void *)cma, count, align, gfp_mask);
459 
460 	if (!count)
461 		goto out;
462 
463 	trace_cma_alloc_start(cma->name, count, align);
464 
465 	mask = cma_bitmap_aligned_mask(cma, align);
466 	offset = cma_bitmap_aligned_offset(cma, align);
467 	bitmap_maxno = cma_bitmap_maxno(cma);
468 	bitmap_count = cma_bitmap_pages_to_bits(cma, count);
469 
470 	if (bitmap_count > bitmap_maxno)
471 		goto out;
472 
473 	lru_cache_disable();
474 	for (;;) {
475 		struct acr_info info = {0};
476 
477 		mutex_lock(&cma->lock);
478 		bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
479 				bitmap_maxno, start, bitmap_count, mask,
480 				offset);
481 		if (bitmap_no >= bitmap_maxno) {
482 			if ((num_attempts < max_retries) && (ret == -EBUSY)) {
483 				mutex_unlock(&cma->lock);
484 
485 				if (fatal_signal_pending(current) ||
486 				    (gfp_mask & __GFP_NORETRY))
487 					break;
488 
489 				/*
490 				 * Page may be momentarily pinned by some other
491 				 * process which has been scheduled out, e.g.
492 				 * in exit path, during unmap call, or process
493 				 * fork and so cannot be freed there. Sleep
494 				 * for 100ms and retry the allocation.
495 				 */
496 				start = 0;
497 				ret = -ENOMEM;
498 				schedule_timeout_killable(msecs_to_jiffies(100));
499 				num_attempts++;
500 				continue;
501 			} else {
502 				mutex_unlock(&cma->lock);
503 				break;
504 			}
505 		}
506 		bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
507 		/*
508 		 * It's safe to drop the lock here. We've marked this region for
509 		 * our exclusive use. If the migration fails we will take the
510 		 * lock again and unmark it.
511 		 */
512 		mutex_unlock(&cma->lock);
513 
514 		pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
515 		mutex_lock(&cma_mutex);
516 		ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, gfp_mask, &info);
517 		mutex_unlock(&cma_mutex);
518 		cma_info.nr_migrated += info.nr_migrated;
519 		cma_info.nr_reclaimed += info.nr_reclaimed;
520 		cma_info.nr_mapped += info.nr_mapped;
521 		if (info.err) {
522 			if (info.err & ACR_ERR_ISOLATE)
523 				cma_info.nr_isolate_fail++;
524 			if (info.err & ACR_ERR_MIGRATE)
525 				cma_info.nr_migrate_fail++;
526 			if (info.err & ACR_ERR_TEST)
527 				cma_info.nr_test_fail++;
528 		}
529 		if (ret == 0) {
530 			page = pfn_to_page(pfn);
531 			break;
532 		}
533 
534 		cma_clear_bitmap(cma, pfn, count);
535 		if (ret != -EBUSY)
536 			break;
537 
538 		pr_debug("%s(): memory range at %p is busy, retrying\n",
539 			 __func__, pfn_to_page(pfn));
540 
541 		trace_cma_alloc_busy_retry(cma->name, pfn, pfn_to_page(pfn),
542 					   count, align);
543 
544 		if (info.failed_pfn && gfp_mask & __GFP_NORETRY) {
545 			/* try again from following failed page */
546 			start = (pfn_max_align_up(info.failed_pfn + 1) -
547 				 cma->base_pfn) >> cma->order_per_bit;
548 
549 		} else {
550 			/* try again with a bit different memory target */
551 			start = bitmap_no + mask + 1;
552 		}
553 	}
554 
555 	lru_cache_enable();
556 	trace_cma_alloc_finish(cma->name, pfn, page, count, align);
557 	trace_cma_alloc_info(cma->name, page, count, align, &cma_info);
558 
559 	/*
560 	 * CMA can allocate multiple page blocks, which results in different
561 	 * blocks being marked with different tags. Reset the tags to ignore
562 	 * those page blocks.
563 	 */
564 	if (page) {
565 		for (i = 0; i < count; i++)
566 			page_kasan_tag_reset(nth_page(page, i));
567 	}
568 
569 	if (ret && !(gfp_mask & __GFP_NOWARN)) {
570 		pr_err("%s: %s: alloc failed, req-size: %zu pages, ret: %d\n",
571 		       __func__, cma->name, count, ret);
572 		cma_debug_show_areas(cma);
573 	}
574 
575 	pr_debug("%s(): returned %p\n", __func__, page);
576 out:
577 	trace_android_vh_cma_alloc_finish(cma, page, count, align, gfp_mask, ts);
578 	if (page) {
579 		count_vm_event(CMA_ALLOC_SUCCESS);
580 		cma_sysfs_account_success_pages(cma, count);
581 	} else {
582 		count_vm_event(CMA_ALLOC_FAIL);
583 		if (cma)
584 			cma_sysfs_account_fail_pages(cma, count);
585 	}
586 
587 	return page;
588 }
589 EXPORT_SYMBOL_GPL(cma_alloc);
590 
591 /**
592  * cma_release() - release allocated pages
593  * @cma:   Contiguous memory region for which the allocation is performed.
594  * @pages: Allocated pages.
595  * @count: Number of allocated pages.
596  *
597  * This function releases memory allocated by cma_alloc().
598  * It returns false when provided pages do not belong to contiguous area and
599  * true otherwise.
600  */
cma_release(struct cma * cma,const struct page * pages,unsigned int count)601 bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
602 {
603 	unsigned long pfn;
604 
605 	if (!cma || !pages)
606 		return false;
607 
608 	pr_debug("%s(page %p, count %u)\n", __func__, (void *)pages, count);
609 
610 	pfn = page_to_pfn(pages);
611 
612 	if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
613 		return false;
614 
615 	VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
616 
617 	free_contig_range(pfn, count);
618 	cma_clear_bitmap(cma, pfn, count);
619 	trace_cma_release(cma->name, pfn, pages, count);
620 
621 	return true;
622 }
623 EXPORT_SYMBOL_GPL(cma_release);
624 
cma_for_each_area(int (* it)(struct cma * cma,void * data),void * data)625 int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
626 {
627 	int i;
628 
629 	for (i = 0; i < cma_area_count; i++) {
630 		int ret = it(&cma_areas[i], data);
631 
632 		if (ret)
633 			return ret;
634 	}
635 
636 	return 0;
637 }
638 EXPORT_SYMBOL_GPL(cma_for_each_area);
639