• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Contiguous Memory Allocator
4  *
5  * Copyright (c) 2010-2011 by Samsung Electronics.
6  * Copyright IBM Corporation, 2013
7  * Copyright LG Electronics Inc., 2014
8  * Written by:
9  *	Marek Szyprowski <m.szyprowski@samsung.com>
10  *	Michal Nazarewicz <mina86@mina86.com>
11  *	Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
12  *	Joonsoo Kim <iamjoonsoo.kim@lge.com>
13  */
14 
15 #define pr_fmt(fmt) "cma: " fmt
16 
17 #ifdef CONFIG_CMA_DEBUG
18 #ifndef DEBUG
19 #  define DEBUG
20 #endif
21 #endif
22 #define CREATE_TRACE_POINTS
23 
24 #include <linux/memblock.h>
25 #include <linux/err.h>
26 #include <linux/mm.h>
27 #include <linux/module.h>
28 #include <linux/mutex.h>
29 #include <linux/sizes.h>
30 #include <linux/slab.h>
31 #include <linux/log2.h>
32 #include <linux/cma.h>
33 #include <linux/highmem.h>
34 #include <linux/io.h>
35 #include <linux/kmemleak.h>
36 #include <trace/events/cma.h>
37 
38 #include "cma.h"
39 
40 struct cma cma_areas[MAX_CMA_AREAS];
41 unsigned cma_area_count;
42 static DEFINE_MUTEX(cma_mutex);
43 
cma_get_base(const struct cma * cma)44 phys_addr_t cma_get_base(const struct cma *cma)
45 {
46 	return PFN_PHYS(cma->base_pfn);
47 }
48 
cma_get_size(const struct cma * cma)49 unsigned long cma_get_size(const struct cma *cma)
50 {
51 	return cma->count << PAGE_SHIFT;
52 }
53 
cma_get_name(const struct cma * cma)54 const char *cma_get_name(const struct cma *cma)
55 {
56 	return cma->name ? cma->name : "(undefined)";
57 }
58 EXPORT_SYMBOL_GPL(cma_get_name);
59 
cma_bitmap_aligned_mask(const struct cma * cma,unsigned int align_order)60 static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
61 					     unsigned int align_order)
62 {
63 	if (align_order <= cma->order_per_bit)
64 		return 0;
65 	return (1UL << (align_order - cma->order_per_bit)) - 1;
66 }
67 
68 /*
69  * Find the offset of the base PFN from the specified align_order.
70  * The value returned is represented in order_per_bits.
71  */
cma_bitmap_aligned_offset(const struct cma * cma,unsigned int align_order)72 static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
73 					       unsigned int align_order)
74 {
75 	return (cma->base_pfn & ((1UL << align_order) - 1))
76 		>> cma->order_per_bit;
77 }
78 
cma_bitmap_pages_to_bits(const struct cma * cma,unsigned long pages)79 static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
80 					      unsigned long pages)
81 {
82 	return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
83 }
84 
cma_clear_bitmap(struct cma * cma,unsigned long pfn,unsigned int count)85 static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
86 			     unsigned int count)
87 {
88 	unsigned long bitmap_no, bitmap_count;
89 
90 	bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
91 	bitmap_count = cma_bitmap_pages_to_bits(cma, count);
92 
93 	mutex_lock(&cma->lock);
94 	bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
95 	mutex_unlock(&cma->lock);
96 }
97 
cma_activate_area(struct cma * cma)98 static void __init cma_activate_area(struct cma *cma)
99 {
100 	unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
101 	unsigned i = cma->count >> pageblock_order;
102 	struct zone *zone;
103 
104 	cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL);
105 	if (!cma->bitmap)
106 		goto out_error;
107 
108 	WARN_ON_ONCE(!pfn_valid(pfn));
109 	zone = page_zone(pfn_to_page(pfn));
110 
111 	do {
112 		unsigned j;
113 
114 		base_pfn = pfn;
115 		for (j = pageblock_nr_pages; j; --j, pfn++) {
116 			WARN_ON_ONCE(!pfn_valid(pfn));
117 			/*
118 			 * alloc_contig_range requires the pfn range
119 			 * specified to be in the same zone. Make this
120 			 * simple by forcing the entire CMA resv range
121 			 * to be in the same zone.
122 			 */
123 			if (page_zone(pfn_to_page(pfn)) != zone)
124 				goto not_in_zone;
125 		}
126 		init_cma_reserved_pageblock(pfn_to_page(base_pfn));
127 	} while (--i);
128 
129 	mutex_init(&cma->lock);
130 
131 #ifdef CONFIG_CMA_DEBUGFS
132 	INIT_HLIST_HEAD(&cma->mem_head);
133 	spin_lock_init(&cma->mem_head_lock);
134 #endif
135 
136 	return;
137 
138 not_in_zone:
139 	bitmap_free(cma->bitmap);
140 out_error:
141 	cma->count = 0;
142 	pr_err("CMA area %s could not be activated\n", cma->name);
143 	return;
144 }
145 
cma_init_reserved_areas(void)146 static int __init cma_init_reserved_areas(void)
147 {
148 	int i;
149 
150 	for (i = 0; i < cma_area_count; i++)
151 		cma_activate_area(&cma_areas[i]);
152 
153 	return 0;
154 }
155 core_initcall(cma_init_reserved_areas);
156 
157 /**
158  * cma_init_reserved_mem() - create custom contiguous area from reserved memory
159  * @base: Base address of the reserved area
160  * @size: Size of the reserved area (in bytes),
161  * @order_per_bit: Order of pages represented by one bit on bitmap.
162  * @name: The name of the area. If this parameter is NULL, the name of
163  *        the area will be set to "cmaN", where N is a running counter of
164  *        used areas.
165  * @res_cma: Pointer to store the created cma region.
166  *
167  * This function creates custom contiguous area from already reserved memory.
168  */
cma_init_reserved_mem(phys_addr_t base,phys_addr_t size,unsigned int order_per_bit,const char * name,struct cma ** res_cma)169 int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
170 				 unsigned int order_per_bit,
171 				 const char *name,
172 				 struct cma **res_cma)
173 {
174 	struct cma *cma;
175 	phys_addr_t alignment;
176 
177 	/* Sanity checks */
178 	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
179 		pr_err("Not enough slots for CMA reserved regions!\n");
180 		return -ENOSPC;
181 	}
182 
183 	if (!size || !memblock_is_region_reserved(base, size))
184 		return -EINVAL;
185 
186 	/* ensure minimal alignment required by mm core */
187 	alignment = PAGE_SIZE <<
188 			max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
189 
190 	/* alignment should be aligned with order_per_bit */
191 	if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
192 		return -EINVAL;
193 
194 	if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
195 		return -EINVAL;
196 
197 	/*
198 	 * Each reserved area must be initialised later, when more kernel
199 	 * subsystems (like slab allocator) are available.
200 	 */
201 	cma = &cma_areas[cma_area_count];
202 	if (name) {
203 		cma->name = name;
204 	} else {
205 		cma->name = kasprintf(GFP_KERNEL, "cma%d\n", cma_area_count);
206 		if (!cma->name)
207 			return -ENOMEM;
208 	}
209 	cma->base_pfn = PFN_DOWN(base);
210 	cma->count = size >> PAGE_SHIFT;
211 	cma->order_per_bit = order_per_bit;
212 	*res_cma = cma;
213 	cma_area_count++;
214 	totalcma_pages += (size / PAGE_SIZE);
215 
216 	return 0;
217 }
218 
219 /**
220  * cma_declare_contiguous() - reserve custom contiguous area
221  * @base: Base address of the reserved area optional, use 0 for any
222  * @size: Size of the reserved area (in bytes),
223  * @limit: End address of the reserved memory (optional, 0 for any).
224  * @alignment: Alignment for the CMA area, should be power of 2 or zero
225  * @order_per_bit: Order of pages represented by one bit on bitmap.
226  * @fixed: hint about where to place the reserved area
227  * @name: The name of the area. See function cma_init_reserved_mem()
228  * @res_cma: Pointer to store the created cma region.
229  *
230  * This function reserves memory from early allocator. It should be
231  * called by arch specific code once the early allocator (memblock or bootmem)
232  * has been activated and all other subsystems have already allocated/reserved
233  * memory. This function allows to create custom reserved areas.
234  *
235  * If @fixed is true, reserve contiguous area at exactly @base.  If false,
236  * reserve in range from @base to @limit.
237  */
cma_declare_contiguous(phys_addr_t base,phys_addr_t size,phys_addr_t limit,phys_addr_t alignment,unsigned int order_per_bit,bool fixed,const char * name,struct cma ** res_cma)238 int __init cma_declare_contiguous(phys_addr_t base,
239 			phys_addr_t size, phys_addr_t limit,
240 			phys_addr_t alignment, unsigned int order_per_bit,
241 			bool fixed, const char *name, struct cma **res_cma)
242 {
243 	phys_addr_t memblock_end = memblock_end_of_DRAM();
244 	phys_addr_t highmem_start;
245 	int ret = 0;
246 
247 	/*
248 	 * We can't use __pa(high_memory) directly, since high_memory
249 	 * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly)
250 	 * complain. Find the boundary by adding one to the last valid
251 	 * address.
252 	 */
253 	highmem_start = __pa(high_memory - 1) + 1;
254 	pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
255 		__func__, &size, &base, &limit, &alignment);
256 
257 	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
258 		pr_err("Not enough slots for CMA reserved regions!\n");
259 		return -ENOSPC;
260 	}
261 
262 	if (!size)
263 		return -EINVAL;
264 
265 	if (alignment && !is_power_of_2(alignment))
266 		return -EINVAL;
267 
268 	/*
269 	 * Sanitise input arguments.
270 	 * Pages both ends in CMA area could be merged into adjacent unmovable
271 	 * migratetype page by page allocator's buddy algorithm. In the case,
272 	 * you couldn't get a contiguous memory, which is not what we want.
273 	 */
274 	alignment = max(alignment,  (phys_addr_t)PAGE_SIZE <<
275 			  max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
276 	if (fixed && base & (alignment - 1)) {
277 		ret = -EINVAL;
278 		pr_err("Region at %pa must be aligned to %pa bytes\n",
279 			&base, &alignment);
280 		goto err;
281 	}
282 	base = ALIGN(base, alignment);
283 	size = ALIGN(size, alignment);
284 	limit &= ~(alignment - 1);
285 
286 	if (!base)
287 		fixed = false;
288 
289 	/* size should be aligned with order_per_bit */
290 	if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
291 		return -EINVAL;
292 
293 	/*
294 	 * If allocating at a fixed base the request region must not cross the
295 	 * low/high memory boundary.
296 	 */
297 	if (fixed && base < highmem_start && base + size > highmem_start) {
298 		ret = -EINVAL;
299 		pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
300 			&base, &highmem_start);
301 		goto err;
302 	}
303 
304 	/*
305 	 * If the limit is unspecified or above the memblock end, its effective
306 	 * value will be the memblock end. Set it explicitly to simplify further
307 	 * checks.
308 	 */
309 	if (limit == 0 || limit > memblock_end)
310 		limit = memblock_end;
311 
312 	if (base + size > limit) {
313 		ret = -EINVAL;
314 		pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n",
315 			&size, &base, &limit);
316 		goto err;
317 	}
318 
319 	/* Reserve memory */
320 	if (fixed) {
321 		if (memblock_is_region_reserved(base, size) ||
322 		    memblock_reserve(base, size) < 0) {
323 			ret = -EBUSY;
324 			goto err;
325 		}
326 	} else {
327 		phys_addr_t addr = 0;
328 
329 		/*
330 		 * All pages in the reserved area must come from the same zone.
331 		 * If the requested region crosses the low/high memory boundary,
332 		 * try allocating from high memory first and fall back to low
333 		 * memory in case of failure.
334 		 */
335 		if (base < highmem_start && limit > highmem_start) {
336 			addr = memblock_phys_alloc_range(size, alignment,
337 							 highmem_start, limit);
338 			limit = highmem_start;
339 		}
340 
341 		if (!addr) {
342 			addr = memblock_phys_alloc_range(size, alignment, base,
343 							 limit);
344 			if (!addr) {
345 				ret = -ENOMEM;
346 				goto err;
347 			}
348 		}
349 
350 		/*
351 		 * kmemleak scans/reads tracked objects for pointers to other
352 		 * objects but this address isn't mapped and accessible
353 		 */
354 		kmemleak_ignore_phys(addr);
355 		base = addr;
356 	}
357 
358 	ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
359 	if (ret)
360 		goto free_mem;
361 
362 	pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
363 		&base);
364 	return 0;
365 
366 free_mem:
367 	memblock_free(base, size);
368 err:
369 	pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
370 	return ret;
371 }
372 
373 #ifdef CONFIG_CMA_DEBUG
cma_debug_show_areas(struct cma * cma)374 static void cma_debug_show_areas(struct cma *cma)
375 {
376 	unsigned long next_zero_bit, next_set_bit, nr_zero;
377 	unsigned long start = 0;
378 	unsigned long nr_part, nr_total = 0;
379 	unsigned long nbits = cma_bitmap_maxno(cma);
380 
381 	mutex_lock(&cma->lock);
382 	pr_info("number of available pages: ");
383 	for (;;) {
384 		next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start);
385 		if (next_zero_bit >= nbits)
386 			break;
387 		next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit);
388 		nr_zero = next_set_bit - next_zero_bit;
389 		nr_part = nr_zero << cma->order_per_bit;
390 		pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part,
391 			next_zero_bit);
392 		nr_total += nr_part;
393 		start = next_zero_bit + nr_zero;
394 	}
395 	pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count);
396 	mutex_unlock(&cma->lock);
397 }
398 #else
cma_debug_show_areas(struct cma * cma)399 static inline void cma_debug_show_areas(struct cma *cma) { }
400 #endif
401 
402 /**
403  * cma_alloc() - allocate pages from contiguous area
404  * @cma:   Contiguous memory region for which the allocation is performed.
405  * @count: Requested number of pages.
406  * @align: Requested alignment of pages (in PAGE_SIZE order).
407  * @no_warn: Avoid printing message about failed allocation
408  *
409  * This function allocates part of contiguous memory on specific
410  * contiguous memory area.
411  */
cma_alloc(struct cma * cma,size_t count,unsigned int align,bool no_warn)412 struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
413 		       bool no_warn)
414 {
415 	unsigned long mask, offset;
416 	unsigned long pfn = -1;
417 	unsigned long start = 0;
418 	unsigned long bitmap_maxno, bitmap_no, bitmap_count;
419 	size_t i;
420 	struct page *page = NULL;
421 	int ret = -ENOMEM;
422 
423 	if (!cma || !cma->count)
424 		return NULL;
425 
426 	pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma,
427 		 count, align);
428 
429 	if (!count)
430 		return NULL;
431 
432 	mask = cma_bitmap_aligned_mask(cma, align);
433 	offset = cma_bitmap_aligned_offset(cma, align);
434 	bitmap_maxno = cma_bitmap_maxno(cma);
435 	bitmap_count = cma_bitmap_pages_to_bits(cma, count);
436 
437 	if (bitmap_count > bitmap_maxno)
438 		return NULL;
439 
440 	for (;;) {
441 		mutex_lock(&cma->lock);
442 		bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
443 				bitmap_maxno, start, bitmap_count, mask,
444 				offset);
445 		if (bitmap_no >= bitmap_maxno) {
446 			mutex_unlock(&cma->lock);
447 			break;
448 		}
449 		bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
450 		/*
451 		 * It's safe to drop the lock here. We've marked this region for
452 		 * our exclusive use. If the migration fails we will take the
453 		 * lock again and unmark it.
454 		 */
455 		mutex_unlock(&cma->lock);
456 
457 		pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
458 		mutex_lock(&cma_mutex);
459 		ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
460 				     GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
461 		mutex_unlock(&cma_mutex);
462 		if (ret == 0) {
463 			page = pfn_to_page(pfn);
464 			break;
465 		}
466 
467 		cma_clear_bitmap(cma, pfn, count);
468 		if (ret != -EBUSY)
469 			break;
470 
471 		pr_debug("%s(): memory range at %p is busy, retrying\n",
472 			 __func__, pfn_to_page(pfn));
473 		/* try again with a bit different memory target */
474 		start = bitmap_no + mask + 1;
475 	}
476 
477 	trace_cma_alloc(pfn, page, count, align);
478 
479 	/*
480 	 * CMA can allocate multiple page blocks, which results in different
481 	 * blocks being marked with different tags. Reset the tags to ignore
482 	 * those page blocks.
483 	 */
484 	if (page) {
485 		for (i = 0; i < count; i++)
486 			page_kasan_tag_reset(nth_page(page, i));
487 	}
488 
489 	if (ret && !no_warn) {
490 		pr_err("%s: alloc failed, req-size: %zu pages, ret: %d\n",
491 			__func__, count, ret);
492 		cma_debug_show_areas(cma);
493 	}
494 
495 	pr_debug("%s(): returned %p\n", __func__, page);
496 	return page;
497 }
498 EXPORT_SYMBOL_GPL(cma_alloc);
499 
500 /**
501  * cma_release() - release allocated pages
502  * @cma:   Contiguous memory region for which the allocation is performed.
503  * @pages: Allocated pages.
504  * @count: Number of allocated pages.
505  *
506  * This function releases memory allocated by cma_alloc().
507  * It returns false when provided pages do not belong to contiguous area and
508  * true otherwise.
509  */
cma_release(struct cma * cma,const struct page * pages,unsigned int count)510 bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
511 {
512 	unsigned long pfn;
513 
514 	if (!cma || !pages)
515 		return false;
516 
517 	pr_debug("%s(page %p)\n", __func__, (void *)pages);
518 
519 	pfn = page_to_pfn(pages);
520 
521 	if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
522 		return false;
523 
524 	VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
525 
526 	free_contig_range(pfn, count);
527 	cma_clear_bitmap(cma, pfn, count);
528 	trace_cma_release(pfn, pages, count);
529 
530 	return true;
531 }
532 EXPORT_SYMBOL_GPL(cma_release);
533 
cma_for_each_area(int (* it)(struct cma * cma,void * data),void * data)534 int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
535 {
536 	int i;
537 
538 	for (i = 0; i < cma_area_count; i++) {
539 		int ret = it(&cma_areas[i], data);
540 
541 		if (ret)
542 			return ret;
543 	}
544 
545 	return 0;
546 }
547 EXPORT_SYMBOL_GPL(cma_for_each_area);
548