1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Contiguous Memory Allocator
4 *
5 * Copyright (c) 2010-2011 by Samsung Electronics.
6 * Copyright IBM Corporation, 2013
7 * Copyright LG Electronics Inc., 2014
8 * Written by:
9 * Marek Szyprowski <m.szyprowski@samsung.com>
10 * Michal Nazarewicz <mina86@mina86.com>
11 * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
12 * Joonsoo Kim <iamjoonsoo.kim@lge.com>
13 */
14
15 #define pr_fmt(fmt) "cma: " fmt
16
17 #ifdef CONFIG_CMA_DEBUG
18 #ifndef DEBUG
19 # define DEBUG
20 #endif
21 #endif
22 #define CREATE_TRACE_POINTS
23
24 #include <linux/memblock.h>
25 #include <linux/err.h>
26 #include <linux/mm.h>
27 #include <linux/module.h>
28 #include <linux/sizes.h>
29 #include <linux/slab.h>
30 #include <linux/log2.h>
31 #include <linux/cma.h>
32 #include <linux/highmem.h>
33 #include <linux/io.h>
34 #include <linux/kmemleak.h>
35 #include <linux/sched.h>
36 #include <linux/jiffies.h>
37 #include <trace/events/cma.h>
38
39 #include "cma.h"
40
41 #undef CREATE_TRACE_POINTS
42 #ifndef __GENKSYMS__
43 #include <trace/hooks/mm.h>
44 #endif
45
46 struct cma cma_areas[MAX_CMA_AREAS];
47 unsigned cma_area_count;
48 static DEFINE_MUTEX(cma_mutex);
49
cma_get_base(const struct cma * cma)50 phys_addr_t cma_get_base(const struct cma *cma)
51 {
52 return PFN_PHYS(cma->base_pfn);
53 }
54
cma_get_size(const struct cma * cma)55 unsigned long cma_get_size(const struct cma *cma)
56 {
57 return cma->count << PAGE_SHIFT;
58 }
59
cma_get_name(const struct cma * cma)60 const char *cma_get_name(const struct cma *cma)
61 {
62 return cma->name;
63 }
64 EXPORT_SYMBOL_GPL(cma_get_name);
65
cma_bitmap_aligned_mask(const struct cma * cma,unsigned int align_order)66 static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
67 unsigned int align_order)
68 {
69 if (align_order <= cma->order_per_bit)
70 return 0;
71 return (1UL << (align_order - cma->order_per_bit)) - 1;
72 }
73
74 /*
75 * Find the offset of the base PFN from the specified align_order.
76 * The value returned is represented in order_per_bits.
77 */
cma_bitmap_aligned_offset(const struct cma * cma,unsigned int align_order)78 static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
79 unsigned int align_order)
80 {
81 return (cma->base_pfn & ((1UL << align_order) - 1))
82 >> cma->order_per_bit;
83 }
84
cma_bitmap_pages_to_bits(const struct cma * cma,unsigned long pages)85 static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
86 unsigned long pages)
87 {
88 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
89 }
90
cma_clear_bitmap(struct cma * cma,unsigned long pfn,unsigned long count)91 static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
92 unsigned long count)
93 {
94 unsigned long bitmap_no, bitmap_count;
95 unsigned long flags;
96
97 bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
98 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
99
100 spin_lock_irqsave(&cma->lock, flags);
101 bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
102 spin_unlock_irqrestore(&cma->lock, flags);
103 }
104
cma_activate_area(struct cma * cma)105 static void __init cma_activate_area(struct cma *cma)
106 {
107 unsigned long base_pfn = cma->base_pfn, pfn;
108 struct zone *zone;
109
110 cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL);
111 if (!cma->bitmap)
112 goto out_error;
113
114 /*
115 * alloc_contig_range() requires the pfn range specified to be in the
116 * same zone. Simplify by forcing the entire CMA resv range to be in the
117 * same zone.
118 */
119 WARN_ON_ONCE(!pfn_valid(base_pfn));
120 zone = page_zone(pfn_to_page(base_pfn));
121 for (pfn = base_pfn + 1; pfn < base_pfn + cma->count; pfn++) {
122 WARN_ON_ONCE(!pfn_valid(pfn));
123 if (page_zone(pfn_to_page(pfn)) != zone)
124 goto not_in_zone;
125 }
126
127 for (pfn = base_pfn; pfn < base_pfn + cma->count;
128 pfn += pageblock_nr_pages)
129 init_cma_reserved_pageblock(pfn_to_page(pfn));
130
131 spin_lock_init(&cma->lock);
132
133 #ifdef CONFIG_CMA_DEBUGFS
134 INIT_HLIST_HEAD(&cma->mem_head);
135 spin_lock_init(&cma->mem_head_lock);
136 #endif
137
138 return;
139
140 not_in_zone:
141 bitmap_free(cma->bitmap);
142 out_error:
143 /* Expose all pages to the buddy, they are useless for CMA. */
144 for (pfn = base_pfn; pfn < base_pfn + cma->count; pfn++)
145 free_reserved_page(pfn_to_page(pfn));
146 totalcma_pages -= cma->count;
147 cma->count = 0;
148 pr_err("CMA area %s could not be activated\n", cma->name);
149 return;
150 }
151
cma_init_reserved_areas(void)152 static int __init cma_init_reserved_areas(void)
153 {
154 int i;
155
156 for (i = 0; i < cma_area_count; i++)
157 cma_activate_area(&cma_areas[i]);
158
159 return 0;
160 }
161 core_initcall(cma_init_reserved_areas);
162
163 /**
164 * cma_init_reserved_mem() - create custom contiguous area from reserved memory
165 * @base: Base address of the reserved area
166 * @size: Size of the reserved area (in bytes),
167 * @order_per_bit: Order of pages represented by one bit on bitmap.
168 * @name: The name of the area. If this parameter is NULL, the name of
169 * the area will be set to "cmaN", where N is a running counter of
170 * used areas.
171 * @res_cma: Pointer to store the created cma region.
172 *
173 * This function creates custom contiguous area from already reserved memory.
174 */
cma_init_reserved_mem(phys_addr_t base,phys_addr_t size,unsigned int order_per_bit,const char * name,struct cma ** res_cma)175 int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
176 unsigned int order_per_bit,
177 const char *name,
178 struct cma **res_cma)
179 {
180 struct cma *cma;
181 phys_addr_t alignment;
182
183 /* Sanity checks */
184 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
185 pr_err("Not enough slots for CMA reserved regions!\n");
186 return -ENOSPC;
187 }
188
189 if (!size || !memblock_is_region_reserved(base, size))
190 return -EINVAL;
191
192 /* ensure minimal alignment required by mm core */
193 alignment = PAGE_SIZE <<
194 max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
195
196 /* alignment should be aligned with order_per_bit */
197 if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
198 return -EINVAL;
199
200 if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
201 return -EINVAL;
202
203 /*
204 * Each reserved area must be initialised later, when more kernel
205 * subsystems (like slab allocator) are available.
206 */
207 cma = &cma_areas[cma_area_count];
208
209 if (name)
210 snprintf(cma->name, CMA_MAX_NAME, name);
211 else
212 snprintf(cma->name, CMA_MAX_NAME, "cma%d\n", cma_area_count);
213
214 cma->base_pfn = PFN_DOWN(base);
215 cma->count = size >> PAGE_SHIFT;
216 cma->order_per_bit = order_per_bit;
217 *res_cma = cma;
218 cma_area_count++;
219 totalcma_pages += (size / PAGE_SIZE);
220
221 return 0;
222 }
223
224 /**
225 * cma_declare_contiguous_nid() - reserve custom contiguous area
226 * @base: Base address of the reserved area optional, use 0 for any
227 * @size: Size of the reserved area (in bytes),
228 * @limit: End address of the reserved memory (optional, 0 for any).
229 * @alignment: Alignment for the CMA area, should be power of 2 or zero
230 * @order_per_bit: Order of pages represented by one bit on bitmap.
231 * @fixed: hint about where to place the reserved area
232 * @name: The name of the area. See function cma_init_reserved_mem()
233 * @res_cma: Pointer to store the created cma region.
234 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
235 *
236 * This function reserves memory from early allocator. It should be
237 * called by arch specific code once the early allocator (memblock or bootmem)
238 * has been activated and all other subsystems have already allocated/reserved
239 * memory. This function allows to create custom reserved areas.
240 *
241 * If @fixed is true, reserve contiguous area at exactly @base. If false,
242 * reserve in range from @base to @limit.
243 */
cma_declare_contiguous_nid(phys_addr_t base,phys_addr_t size,phys_addr_t limit,phys_addr_t alignment,unsigned int order_per_bit,bool fixed,const char * name,struct cma ** res_cma,int nid)244 int __init cma_declare_contiguous_nid(phys_addr_t base,
245 phys_addr_t size, phys_addr_t limit,
246 phys_addr_t alignment, unsigned int order_per_bit,
247 bool fixed, const char *name, struct cma **res_cma,
248 int nid)
249 {
250 phys_addr_t memblock_end = memblock_end_of_DRAM();
251 phys_addr_t highmem_start;
252 int ret = 0;
253
254 /*
255 * We can't use __pa(high_memory) directly, since high_memory
256 * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly)
257 * complain. Find the boundary by adding one to the last valid
258 * address.
259 */
260 highmem_start = __pa(high_memory - 1) + 1;
261 pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
262 __func__, &size, &base, &limit, &alignment);
263
264 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
265 pr_err("Not enough slots for CMA reserved regions!\n");
266 return -ENOSPC;
267 }
268
269 if (!size)
270 return -EINVAL;
271
272 if (alignment && !is_power_of_2(alignment))
273 return -EINVAL;
274
275 /*
276 * Sanitise input arguments.
277 * Pages both ends in CMA area could be merged into adjacent unmovable
278 * migratetype page by page allocator's buddy algorithm. In the case,
279 * you couldn't get a contiguous memory, which is not what we want.
280 */
281 alignment = max(alignment, (phys_addr_t)PAGE_SIZE <<
282 max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
283 if (fixed && base & (alignment - 1)) {
284 ret = -EINVAL;
285 pr_err("Region at %pa must be aligned to %pa bytes\n",
286 &base, &alignment);
287 goto err;
288 }
289 base = ALIGN(base, alignment);
290 size = ALIGN(size, alignment);
291 limit &= ~(alignment - 1);
292
293 if (!base)
294 fixed = false;
295
296 /* size should be aligned with order_per_bit */
297 if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
298 return -EINVAL;
299
300 /*
301 * If allocating at a fixed base the request region must not cross the
302 * low/high memory boundary.
303 */
304 if (fixed && base < highmem_start && base + size > highmem_start) {
305 ret = -EINVAL;
306 pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
307 &base, &highmem_start);
308 goto err;
309 }
310
311 /*
312 * If the limit is unspecified or above the memblock end, its effective
313 * value will be the memblock end. Set it explicitly to simplify further
314 * checks.
315 */
316 if (limit == 0 || limit > memblock_end)
317 limit = memblock_end;
318
319 if (base + size > limit) {
320 ret = -EINVAL;
321 pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n",
322 &size, &base, &limit);
323 goto err;
324 }
325
326 /* Reserve memory */
327 if (fixed) {
328 if (memblock_is_region_reserved(base, size) ||
329 memblock_reserve(base, size) < 0) {
330 ret = -EBUSY;
331 goto err;
332 }
333 } else {
334 phys_addr_t addr = 0;
335
336 /*
337 * All pages in the reserved area must come from the same zone.
338 * If the requested region crosses the low/high memory boundary,
339 * try allocating from high memory first and fall back to low
340 * memory in case of failure.
341 */
342 if (base < highmem_start && limit > highmem_start) {
343 addr = memblock_alloc_range_nid(size, alignment,
344 highmem_start, limit, nid, true);
345 limit = highmem_start;
346 }
347
348 /*
349 * If there is enough memory, try a bottom-up allocation first.
350 * It will place the new cma area close to the start of the node
351 * and guarantee that the compaction is moving pages out of the
352 * cma area and not into it.
353 * Avoid using first 4GB to not interfere with constrained zones
354 * like DMA/DMA32.
355 */
356 #ifdef CONFIG_PHYS_ADDR_T_64BIT
357 if (!memblock_bottom_up() && memblock_end >= SZ_4G + size) {
358 memblock_set_bottom_up(true);
359 addr = memblock_alloc_range_nid(size, alignment, SZ_4G,
360 limit, nid, true);
361 memblock_set_bottom_up(false);
362 }
363 #endif
364
365 if (!addr) {
366 addr = memblock_alloc_range_nid(size, alignment, base,
367 limit, nid, true);
368 if (!addr) {
369 ret = -ENOMEM;
370 goto err;
371 }
372 }
373
374 /*
375 * kmemleak scans/reads tracked objects for pointers to other
376 * objects but this address isn't mapped and accessible
377 */
378 kmemleak_ignore_phys(addr);
379 base = addr;
380 }
381
382 ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
383 if (ret)
384 goto free_mem;
385
386 pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
387 &base);
388 return 0;
389
390 free_mem:
391 memblock_free(base, size);
392 err:
393 pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
394 return ret;
395 }
396
397 #ifdef CONFIG_CMA_DEBUG
cma_debug_show_areas(struct cma * cma)398 static void cma_debug_show_areas(struct cma *cma)
399 {
400 unsigned long next_zero_bit, next_set_bit, nr_zero;
401 unsigned long start = 0;
402 unsigned long nr_part, nr_total = 0;
403 unsigned long nbits = cma_bitmap_maxno(cma);
404
405 spin_lock_irq(&cma->lock);
406 pr_info("number of available pages: ");
407 for (;;) {
408 next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start);
409 if (next_zero_bit >= nbits)
410 break;
411 next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit);
412 nr_zero = next_set_bit - next_zero_bit;
413 nr_part = nr_zero << cma->order_per_bit;
414 pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part,
415 next_zero_bit);
416 nr_total += nr_part;
417 start = next_zero_bit + nr_zero;
418 }
419 pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count);
420 spin_unlock_irq(&cma->lock);
421 }
422 #else
cma_debug_show_areas(struct cma * cma)423 static inline void cma_debug_show_areas(struct cma *cma) { }
424 #endif
425
426 /**
427 * cma_alloc() - allocate pages from contiguous area
428 * @cma: Contiguous memory region for which the allocation is performed.
429 * @count: Requested number of pages.
430 * @align: Requested alignment of pages (in PAGE_SIZE order).
431 * @no_warn: Avoid printing message about failed allocation
432 *
433 * This function allocates part of contiguous memory on specific
434 * contiguous memory area.
435 */
cma_alloc(struct cma * cma,unsigned long count,unsigned int align,bool no_warn)436 struct page *cma_alloc(struct cma *cma, unsigned long count,
437 unsigned int align, bool no_warn)
438 {
439 unsigned long mask, offset;
440 unsigned long pfn = -1;
441 unsigned long start = 0;
442 unsigned long bitmap_maxno, bitmap_no, bitmap_count;
443 unsigned long i;
444 struct page *page = NULL;
445 int ret = -ENOMEM;
446 int num_attempts = 0;
447 int max_retries = 5;
448 bool bypass = false;
449
450 trace_android_vh_cma_alloc_bypass(cma, count, align, no_warn,
451 &page, &bypass);
452 if (bypass)
453 return page;
454
455 if (!cma || !cma->count || !cma->bitmap)
456 goto out;
457
458 pr_debug("%s(cma %p, count %lu, align %d)\n", __func__, (void *)cma,
459 count, align);
460
461 if (!count)
462 goto out;
463
464 trace_cma_alloc_start(cma->name, count, align);
465
466 mask = cma_bitmap_aligned_mask(cma, align);
467 offset = cma_bitmap_aligned_offset(cma, align);
468 bitmap_maxno = cma_bitmap_maxno(cma);
469 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
470
471 if (bitmap_count > bitmap_maxno)
472 goto out;
473
474 trace_android_vh_cma_alloc_retry(cma->name, &max_retries);
475 for (;;) {
476 spin_lock_irq(&cma->lock);
477 bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
478 bitmap_maxno, start, bitmap_count, mask,
479 offset);
480 if (bitmap_no >= bitmap_maxno) {
481 if ((num_attempts < max_retries) && (ret == -EBUSY)) {
482 spin_unlock_irq(&cma->lock);
483
484 if (fatal_signal_pending(current)) {
485 ret = -EINTR;
486 break;
487 }
488
489 /*
490 * Page may be momentarily pinned by some other
491 * process which has been scheduled out, e.g.
492 * in exit path, during unmap call, or process
493 * fork and so cannot be freed there. Sleep
494 * for 100ms and retry the allocation.
495 */
496 start = 0;
497 ret = -ENOMEM;
498 schedule_timeout_killable(msecs_to_jiffies(100));
499 num_attempts++;
500 continue;
501 } else {
502 spin_unlock_irq(&cma->lock);
503 break;
504 }
505 }
506 bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
507 /*
508 * It's safe to drop the lock here. We've marked this region for
509 * our exclusive use. If the migration fails we will take the
510 * lock again and unmark it.
511 */
512 spin_unlock_irq(&cma->lock);
513
514 pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
515 mutex_lock(&cma_mutex);
516 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
517 GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
518 mutex_unlock(&cma_mutex);
519 if (ret == 0) {
520 page = pfn_to_page(pfn);
521 break;
522 }
523
524 cma_clear_bitmap(cma, pfn, count);
525 if (ret != -EBUSY)
526 break;
527
528 pr_debug("%s(): memory range at %p is busy, retrying\n",
529 __func__, pfn_to_page(pfn));
530
531 trace_cma_alloc_busy_retry(cma->name, pfn, pfn_to_page(pfn),
532 count, align);
533 /* try again with a bit different memory target */
534 start = bitmap_no + mask + 1;
535 }
536
537 trace_cma_alloc_finish(cma->name, pfn, page, count, align);
538
539 /*
540 * CMA can allocate multiple page blocks, which results in different
541 * blocks being marked with different tags. Reset the tags to ignore
542 * those page blocks.
543 */
544 if (page) {
545 for (i = 0; i < count; i++)
546 page_kasan_tag_reset(nth_page(page, i));
547 }
548
549 if (ret && !no_warn) {
550 pr_err_ratelimited("%s: %s: alloc failed, req-size: %lu pages, ret: %d\n",
551 __func__, cma->name, count, ret);
552 cma_debug_show_areas(cma);
553 }
554
555 pr_debug("%s(): returned %p\n", __func__, page);
556 out:
557 if (page) {
558 count_vm_event(CMA_ALLOC_SUCCESS);
559 cma_sysfs_account_success_pages(cma, count);
560 } else {
561 count_vm_event(CMA_ALLOC_FAIL);
562 if (cma)
563 cma_sysfs_account_fail_pages(cma, count);
564 }
565
566 return page;
567 }
568 EXPORT_SYMBOL_GPL(cma_alloc);
569
570 /**
571 * cma_release() - release allocated pages
572 * @cma: Contiguous memory region for which the allocation is performed.
573 * @pages: Allocated pages.
574 * @count: Number of allocated pages.
575 *
576 * This function releases memory allocated by cma_alloc().
577 * It returns false when provided pages do not belong to contiguous area and
578 * true otherwise.
579 */
cma_release(struct cma * cma,const struct page * pages,unsigned long count)580 bool cma_release(struct cma *cma, const struct page *pages,
581 unsigned long count)
582 {
583 unsigned long pfn;
584
585 if (!cma || !pages)
586 return false;
587
588 pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count);
589
590 pfn = page_to_pfn(pages);
591
592 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
593 return false;
594
595 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
596
597 free_contig_range(pfn, count);
598 cma_clear_bitmap(cma, pfn, count);
599 trace_cma_release(cma->name, pfn, pages, count);
600
601 return true;
602 }
603 EXPORT_SYMBOL_GPL(cma_release);
604
cma_for_each_area(int (* it)(struct cma * cma,void * data),void * data)605 int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
606 {
607 int i;
608
609 for (i = 0; i < cma_area_count; i++) {
610 int ret = it(&cma_areas[i], data);
611
612 if (ret)
613 return ret;
614 }
615
616 return 0;
617 }
618 EXPORT_SYMBOL_GPL(cma_for_each_area);
619