1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Contiguous Memory Allocator
4 *
5 * Copyright (c) 2010-2011 by Samsung Electronics.
6 * Copyright IBM Corporation, 2013
7 * Copyright LG Electronics Inc., 2014
8 * Written by:
9 * Marek Szyprowski <m.szyprowski@samsung.com>
10 * Michal Nazarewicz <mina86@mina86.com>
11 * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
12 * Joonsoo Kim <iamjoonsoo.kim@lge.com>
13 */
14
15 #define pr_fmt(fmt) "cma: " fmt
16
17 #ifdef CONFIG_CMA_DEBUG
18 #ifndef DEBUG
19 # define DEBUG
20 #endif
21 #endif
22 #define CREATE_TRACE_POINTS
23
24 #include <linux/memblock.h>
25 #include <linux/err.h>
26 #include <linux/mm.h>
27 #include <linux/module.h>
28 #include <linux/mutex.h>
29 #include <linux/sizes.h>
30 #include <linux/slab.h>
31 #include <linux/log2.h>
32 #include <linux/cma.h>
33 #include <linux/highmem.h>
34 #include <linux/io.h>
35 #include <linux/kmemleak.h>
36 #include <trace/events/cma.h>
37
38 #include "cma.h"
39
40 struct cma cma_areas[MAX_CMA_AREAS];
41 unsigned cma_area_count;
42 static DEFINE_MUTEX(cma_mutex);
43
cma_get_base(const struct cma * cma)44 phys_addr_t cma_get_base(const struct cma *cma)
45 {
46 return PFN_PHYS(cma->base_pfn);
47 }
48
cma_get_size(const struct cma * cma)49 unsigned long cma_get_size(const struct cma *cma)
50 {
51 return cma->count << PAGE_SHIFT;
52 }
53
cma_get_name(const struct cma * cma)54 const char *cma_get_name(const struct cma *cma)
55 {
56 return cma->name ? cma->name : "(undefined)";
57 }
58 EXPORT_SYMBOL_GPL(cma_get_name);
59
cma_bitmap_aligned_mask(const struct cma * cma,unsigned int align_order)60 static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
61 unsigned int align_order)
62 {
63 if (align_order <= cma->order_per_bit)
64 return 0;
65 return (1UL << (align_order - cma->order_per_bit)) - 1;
66 }
67
68 /*
69 * Find the offset of the base PFN from the specified align_order.
70 * The value returned is represented in order_per_bits.
71 */
cma_bitmap_aligned_offset(const struct cma * cma,unsigned int align_order)72 static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
73 unsigned int align_order)
74 {
75 return (cma->base_pfn & ((1UL << align_order) - 1))
76 >> cma->order_per_bit;
77 }
78
cma_bitmap_pages_to_bits(const struct cma * cma,unsigned long pages)79 static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
80 unsigned long pages)
81 {
82 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
83 }
84
cma_clear_bitmap(struct cma * cma,unsigned long pfn,unsigned int count)85 static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
86 unsigned int count)
87 {
88 unsigned long bitmap_no, bitmap_count;
89
90 bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
91 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
92
93 mutex_lock(&cma->lock);
94 bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
95 mutex_unlock(&cma->lock);
96 }
97
cma_activate_area(struct cma * cma)98 static int __init cma_activate_area(struct cma *cma)
99 {
100 int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long);
101 unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
102 unsigned i = cma->count >> pageblock_order;
103 struct zone *zone;
104
105 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
106
107 if (!cma->bitmap) {
108 cma->count = 0;
109 return -ENOMEM;
110 }
111
112 WARN_ON_ONCE(!pfn_valid(pfn));
113 zone = page_zone(pfn_to_page(pfn));
114
115 do {
116 unsigned j;
117
118 base_pfn = pfn;
119 for (j = pageblock_nr_pages; j; --j, pfn++) {
120 WARN_ON_ONCE(!pfn_valid(pfn));
121 /*
122 * alloc_contig_range requires the pfn range
123 * specified to be in the same zone. Make this
124 * simple by forcing the entire CMA resv range
125 * to be in the same zone.
126 */
127 if (page_zone(pfn_to_page(pfn)) != zone)
128 goto not_in_zone;
129 }
130 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
131 } while (--i);
132
133 mutex_init(&cma->lock);
134
135 #ifdef CONFIG_CMA_DEBUGFS
136 INIT_HLIST_HEAD(&cma->mem_head);
137 spin_lock_init(&cma->mem_head_lock);
138 #endif
139
140 return 0;
141
142 not_in_zone:
143 pr_err("CMA area %s could not be activated\n", cma->name);
144 kfree(cma->bitmap);
145 cma->count = 0;
146 return -EINVAL;
147 }
148
cma_init_reserved_areas(void)149 static int __init cma_init_reserved_areas(void)
150 {
151 int i;
152
153 for (i = 0; i < cma_area_count; i++) {
154 int ret = cma_activate_area(&cma_areas[i]);
155
156 if (ret)
157 return ret;
158 }
159
160 return 0;
161 }
162 core_initcall(cma_init_reserved_areas);
163
164 /**
165 * cma_init_reserved_mem() - create custom contiguous area from reserved memory
166 * @base: Base address of the reserved area
167 * @size: Size of the reserved area (in bytes),
168 * @order_per_bit: Order of pages represented by one bit on bitmap.
169 * @name: The name of the area. If this parameter is NULL, the name of
170 * the area will be set to "cmaN", where N is a running counter of
171 * used areas.
172 * @res_cma: Pointer to store the created cma region.
173 *
174 * This function creates custom contiguous area from already reserved memory.
175 */
cma_init_reserved_mem(phys_addr_t base,phys_addr_t size,unsigned int order_per_bit,const char * name,struct cma ** res_cma)176 int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
177 unsigned int order_per_bit,
178 const char *name,
179 struct cma **res_cma)
180 {
181 struct cma *cma;
182 phys_addr_t alignment;
183
184 /* Sanity checks */
185 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
186 pr_err("Not enough slots for CMA reserved regions!\n");
187 return -ENOSPC;
188 }
189
190 if (!size || !memblock_is_region_reserved(base, size))
191 return -EINVAL;
192
193 /* ensure minimal alignment required by mm core */
194 alignment = PAGE_SIZE <<
195 max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
196
197 /* alignment should be aligned with order_per_bit */
198 if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
199 return -EINVAL;
200
201 if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
202 return -EINVAL;
203
204 /*
205 * Each reserved area must be initialised later, when more kernel
206 * subsystems (like slab allocator) are available.
207 */
208 cma = &cma_areas[cma_area_count];
209 if (name) {
210 cma->name = name;
211 } else {
212 cma->name = kasprintf(GFP_KERNEL, "cma%d\n", cma_area_count);
213 if (!cma->name)
214 return -ENOMEM;
215 }
216 cma->base_pfn = PFN_DOWN(base);
217 cma->count = size >> PAGE_SHIFT;
218 cma->order_per_bit = order_per_bit;
219 *res_cma = cma;
220 cma_area_count++;
221 totalcma_pages += (size / PAGE_SIZE);
222
223 return 0;
224 }
225
226 /**
227 * cma_declare_contiguous() - reserve custom contiguous area
228 * @base: Base address of the reserved area optional, use 0 for any
229 * @size: Size of the reserved area (in bytes),
230 * @limit: End address of the reserved memory (optional, 0 for any).
231 * @alignment: Alignment for the CMA area, should be power of 2 or zero
232 * @order_per_bit: Order of pages represented by one bit on bitmap.
233 * @fixed: hint about where to place the reserved area
234 * @name: The name of the area. See function cma_init_reserved_mem()
235 * @res_cma: Pointer to store the created cma region.
236 *
237 * This function reserves memory from early allocator. It should be
238 * called by arch specific code once the early allocator (memblock or bootmem)
239 * has been activated and all other subsystems have already allocated/reserved
240 * memory. This function allows to create custom reserved areas.
241 *
242 * If @fixed is true, reserve contiguous area at exactly @base. If false,
243 * reserve in range from @base to @limit.
244 */
cma_declare_contiguous(phys_addr_t base,phys_addr_t size,phys_addr_t limit,phys_addr_t alignment,unsigned int order_per_bit,bool fixed,const char * name,struct cma ** res_cma)245 int __init cma_declare_contiguous(phys_addr_t base,
246 phys_addr_t size, phys_addr_t limit,
247 phys_addr_t alignment, unsigned int order_per_bit,
248 bool fixed, const char *name, struct cma **res_cma)
249 {
250 phys_addr_t memblock_end = memblock_end_of_DRAM();
251 phys_addr_t highmem_start;
252 int ret = 0;
253
254 /*
255 * We can't use __pa(high_memory) directly, since high_memory
256 * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly)
257 * complain. Find the boundary by adding one to the last valid
258 * address.
259 */
260 highmem_start = __pa(high_memory - 1) + 1;
261 pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
262 __func__, &size, &base, &limit, &alignment);
263
264 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
265 pr_err("Not enough slots for CMA reserved regions!\n");
266 return -ENOSPC;
267 }
268
269 if (!size)
270 return -EINVAL;
271
272 if (alignment && !is_power_of_2(alignment))
273 return -EINVAL;
274
275 /*
276 * Sanitise input arguments.
277 * Pages both ends in CMA area could be merged into adjacent unmovable
278 * migratetype page by page allocator's buddy algorithm. In the case,
279 * you couldn't get a contiguous memory, which is not what we want.
280 */
281 alignment = max(alignment, (phys_addr_t)PAGE_SIZE <<
282 max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
283 if (fixed && base & (alignment - 1)) {
284 ret = -EINVAL;
285 pr_err("Region at %pa must be aligned to %pa bytes\n",
286 &base, &alignment);
287 goto err;
288 }
289 base = ALIGN(base, alignment);
290 size = ALIGN(size, alignment);
291 limit &= ~(alignment - 1);
292
293 if (!base)
294 fixed = false;
295
296 /* size should be aligned with order_per_bit */
297 if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
298 return -EINVAL;
299
300 /*
301 * If allocating at a fixed base the request region must not cross the
302 * low/high memory boundary.
303 */
304 if (fixed && base < highmem_start && base + size > highmem_start) {
305 ret = -EINVAL;
306 pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
307 &base, &highmem_start);
308 goto err;
309 }
310
311 /*
312 * If the limit is unspecified or above the memblock end, its effective
313 * value will be the memblock end. Set it explicitly to simplify further
314 * checks.
315 */
316 if (limit == 0 || limit > memblock_end)
317 limit = memblock_end;
318
319 if (base + size > limit) {
320 ret = -EINVAL;
321 pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n",
322 &size, &base, &limit);
323 goto err;
324 }
325
326 /* Reserve memory */
327 if (fixed) {
328 if (memblock_is_region_reserved(base, size) ||
329 memblock_reserve(base, size) < 0) {
330 ret = -EBUSY;
331 goto err;
332 }
333 } else {
334 phys_addr_t addr = 0;
335
336 /*
337 * All pages in the reserved area must come from the same zone.
338 * If the requested region crosses the low/high memory boundary,
339 * try allocating from high memory first and fall back to low
340 * memory in case of failure.
341 */
342 if (base < highmem_start && limit > highmem_start) {
343 addr = memblock_phys_alloc_range(size, alignment,
344 highmem_start, limit);
345 limit = highmem_start;
346 }
347
348 if (!addr) {
349 addr = memblock_phys_alloc_range(size, alignment, base,
350 limit);
351 if (!addr) {
352 ret = -ENOMEM;
353 goto err;
354 }
355 }
356
357 /*
358 * kmemleak scans/reads tracked objects for pointers to other
359 * objects but this address isn't mapped and accessible
360 */
361 kmemleak_ignore_phys(addr);
362 base = addr;
363 }
364
365 ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
366 if (ret)
367 goto free_mem;
368
369 pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
370 &base);
371 return 0;
372
373 free_mem:
374 memblock_free(base, size);
375 err:
376 pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
377 return ret;
378 }
379
380 #ifdef CONFIG_CMA_DEBUG
cma_debug_show_areas(struct cma * cma)381 static void cma_debug_show_areas(struct cma *cma)
382 {
383 unsigned long next_zero_bit, next_set_bit, nr_zero;
384 unsigned long start = 0;
385 unsigned long nr_part, nr_total = 0;
386 unsigned long nbits = cma_bitmap_maxno(cma);
387
388 mutex_lock(&cma->lock);
389 pr_info("number of available pages: ");
390 for (;;) {
391 next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start);
392 if (next_zero_bit >= nbits)
393 break;
394 next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit);
395 nr_zero = next_set_bit - next_zero_bit;
396 nr_part = nr_zero << cma->order_per_bit;
397 pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part,
398 next_zero_bit);
399 nr_total += nr_part;
400 start = next_zero_bit + nr_zero;
401 }
402 pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count);
403 mutex_unlock(&cma->lock);
404 }
405 #else
cma_debug_show_areas(struct cma * cma)406 static inline void cma_debug_show_areas(struct cma *cma) { }
407 #endif
408
409 /**
410 * cma_alloc() - allocate pages from contiguous area
411 * @cma: Contiguous memory region for which the allocation is performed.
412 * @count: Requested number of pages.
413 * @align: Requested alignment of pages (in PAGE_SIZE order).
414 * @no_warn: Avoid printing message about failed allocation
415 *
416 * This function allocates part of contiguous memory on specific
417 * contiguous memory area.
418 */
cma_alloc(struct cma * cma,size_t count,unsigned int align,bool no_warn)419 struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
420 bool no_warn)
421 {
422 unsigned long mask, offset;
423 unsigned long pfn = -1;
424 unsigned long start = 0;
425 unsigned long bitmap_maxno, bitmap_no, bitmap_count;
426 size_t i;
427 struct page *page = NULL;
428 int ret = -ENOMEM;
429
430 if (!cma || !cma->count)
431 return NULL;
432
433 pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma,
434 count, align);
435
436 if (!count)
437 return NULL;
438
439 mask = cma_bitmap_aligned_mask(cma, align);
440 offset = cma_bitmap_aligned_offset(cma, align);
441 bitmap_maxno = cma_bitmap_maxno(cma);
442 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
443
444 if (bitmap_count > bitmap_maxno)
445 return NULL;
446
447 for (;;) {
448 mutex_lock(&cma->lock);
449 bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
450 bitmap_maxno, start, bitmap_count, mask,
451 offset);
452 if (bitmap_no >= bitmap_maxno) {
453 mutex_unlock(&cma->lock);
454 break;
455 }
456 bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
457 /*
458 * It's safe to drop the lock here. We've marked this region for
459 * our exclusive use. If the migration fails we will take the
460 * lock again and unmark it.
461 */
462 mutex_unlock(&cma->lock);
463
464 pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
465 mutex_lock(&cma_mutex);
466 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
467 GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
468 mutex_unlock(&cma_mutex);
469 if (ret == 0) {
470 page = pfn_to_page(pfn);
471 break;
472 }
473
474 cma_clear_bitmap(cma, pfn, count);
475 if (ret != -EBUSY)
476 break;
477
478 pr_debug("%s(): memory range at %p is busy, retrying\n",
479 __func__, pfn_to_page(pfn));
480 /* try again with a bit different memory target */
481 start = bitmap_no + mask + 1;
482 }
483
484 trace_cma_alloc(pfn, page, count, align);
485
486 /*
487 * CMA can allocate multiple page blocks, which results in different
488 * blocks being marked with different tags. Reset the tags to ignore
489 * those page blocks.
490 */
491 if (page) {
492 for (i = 0; i < count; i++)
493 page_kasan_tag_reset(page + i);
494 }
495
496 if (ret && !no_warn) {
497 pr_err("%s: alloc failed, req-size: %zu pages, ret: %d\n",
498 __func__, count, ret);
499 cma_debug_show_areas(cma);
500 }
501
502 pr_debug("%s(): returned %p\n", __func__, page);
503 return page;
504 }
505 EXPORT_SYMBOL_GPL(cma_alloc);
506
507 /**
508 * cma_release() - release allocated pages
509 * @cma: Contiguous memory region for which the allocation is performed.
510 * @pages: Allocated pages.
511 * @count: Number of allocated pages.
512 *
513 * This function releases memory allocated by cma_alloc().
514 * It returns false when provided pages do not belong to contiguous area and
515 * true otherwise.
516 */
cma_release(struct cma * cma,const struct page * pages,unsigned int count)517 bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
518 {
519 unsigned long pfn;
520
521 if (!cma || !pages)
522 return false;
523
524 pr_debug("%s(page %p)\n", __func__, (void *)pages);
525
526 pfn = page_to_pfn(pages);
527
528 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
529 return false;
530
531 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
532
533 free_contig_range(pfn, count);
534 cma_clear_bitmap(cma, pfn, count);
535 trace_cma_release(pfn, pages, count);
536
537 return true;
538 }
539 EXPORT_SYMBOL_GPL(cma_release);
540
cma_for_each_area(int (* it)(struct cma * cma,void * data),void * data)541 int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
542 {
543 int i;
544
545 for (i = 0; i < cma_area_count; i++) {
546 int ret = it(&cma_areas[i], data);
547
548 if (ret)
549 return ret;
550 }
551
552 return 0;
553 }
554 EXPORT_SYMBOL_GPL(cma_for_each_area);
555