• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Contiguous Memory Allocator
3  *
4  * Copyright (c) 2010-2011 by Samsung Electronics.
5  * Copyright IBM Corporation, 2013
6  * Copyright LG Electronics Inc., 2014
7  * Written by:
8  *	Marek Szyprowski <m.szyprowski@samsung.com>
9  *	Michal Nazarewicz <mina86@mina86.com>
10  *	Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
11  *	Joonsoo Kim <iamjoonsoo.kim@lge.com>
12  *
13  * This program is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU General Public License as
15  * published by the Free Software Foundation; either version 2 of the
16  * License or (at your optional) any later version of the license.
17  */
18 
19 #define pr_fmt(fmt) "cma: " fmt
20 
21 #ifdef CONFIG_CMA_DEBUG
22 #ifndef DEBUG
23 #  define DEBUG
24 #endif
25 #endif
26 #define CREATE_TRACE_POINTS
27 
28 #include <linux/memblock.h>
29 #include <linux/err.h>
30 #include <linux/mm.h>
31 #include <linux/mutex.h>
32 #include <linux/sizes.h>
33 #include <linux/slab.h>
34 #include <linux/log2.h>
35 #include <linux/cma.h>
36 #include <linux/highmem.h>
37 #include <linux/io.h>
38 #include <trace/events/cma.h>
39 
40 #include "cma.h"
41 
42 struct cma cma_areas[MAX_CMA_AREAS];
43 unsigned cma_area_count;
44 static DEFINE_MUTEX(cma_mutex);
45 
cma_get_base(const struct cma * cma)46 phys_addr_t cma_get_base(const struct cma *cma)
47 {
48 	return PFN_PHYS(cma->base_pfn);
49 }
50 
cma_get_size(const struct cma * cma)51 unsigned long cma_get_size(const struct cma *cma)
52 {
53 	return cma->count << PAGE_SHIFT;
54 }
55 
cma_bitmap_aligned_mask(const struct cma * cma,unsigned int align_order)56 static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
57 					     unsigned int align_order)
58 {
59 	if (align_order <= cma->order_per_bit)
60 		return 0;
61 	return (1UL << (align_order - cma->order_per_bit)) - 1;
62 }
63 
64 /*
65  * Find the offset of the base PFN from the specified align_order.
66  * The value returned is represented in order_per_bits.
67  */
cma_bitmap_aligned_offset(const struct cma * cma,unsigned int align_order)68 static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
69 					       unsigned int align_order)
70 {
71 	return (cma->base_pfn & ((1UL << align_order) - 1))
72 		>> cma->order_per_bit;
73 }
74 
cma_bitmap_pages_to_bits(const struct cma * cma,unsigned long pages)75 static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
76 					      unsigned long pages)
77 {
78 	return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
79 }
80 
cma_clear_bitmap(struct cma * cma,unsigned long pfn,unsigned int count)81 static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
82 			     unsigned int count)
83 {
84 	unsigned long bitmap_no, bitmap_count;
85 
86 	bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
87 	bitmap_count = cma_bitmap_pages_to_bits(cma, count);
88 
89 	mutex_lock(&cma->lock);
90 	bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
91 	mutex_unlock(&cma->lock);
92 }
93 
cma_activate_area(struct cma * cma)94 static int __init cma_activate_area(struct cma *cma)
95 {
96 	int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long);
97 	unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
98 	unsigned i = cma->count >> pageblock_order;
99 	struct zone *zone;
100 
101 	cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
102 
103 	if (!cma->bitmap)
104 		return -ENOMEM;
105 
106 	WARN_ON_ONCE(!pfn_valid(pfn));
107 	zone = page_zone(pfn_to_page(pfn));
108 
109 	do {
110 		unsigned j;
111 
112 		base_pfn = pfn;
113 		for (j = pageblock_nr_pages; j; --j, pfn++) {
114 			WARN_ON_ONCE(!pfn_valid(pfn));
115 			/*
116 			 * alloc_contig_range requires the pfn range
117 			 * specified to be in the same zone. Make this
118 			 * simple by forcing the entire CMA resv range
119 			 * to be in the same zone.
120 			 */
121 			if (page_zone(pfn_to_page(pfn)) != zone)
122 				goto err;
123 		}
124 		init_cma_reserved_pageblock(pfn_to_page(base_pfn));
125 	} while (--i);
126 
127 	mutex_init(&cma->lock);
128 
129 #ifdef CONFIG_CMA_DEBUGFS
130 	INIT_HLIST_HEAD(&cma->mem_head);
131 	spin_lock_init(&cma->mem_head_lock);
132 #endif
133 
134 	return 0;
135 
136 err:
137 	kfree(cma->bitmap);
138 	cma->count = 0;
139 	return -EINVAL;
140 }
141 
cma_init_reserved_areas(void)142 static int __init cma_init_reserved_areas(void)
143 {
144 	int i;
145 
146 	for (i = 0; i < cma_area_count; i++) {
147 		int ret = cma_activate_area(&cma_areas[i]);
148 
149 		if (ret)
150 			return ret;
151 	}
152 
153 	return 0;
154 }
155 core_initcall(cma_init_reserved_areas);
156 
157 /**
158  * cma_init_reserved_mem() - create custom contiguous area from reserved memory
159  * @base: Base address of the reserved area
160  * @size: Size of the reserved area (in bytes),
161  * @order_per_bit: Order of pages represented by one bit on bitmap.
162  * @res_cma: Pointer to store the created cma region.
163  *
164  * This function creates custom contiguous area from already reserved memory.
165  */
cma_init_reserved_mem(phys_addr_t base,phys_addr_t size,unsigned int order_per_bit,struct cma ** res_cma)166 int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
167 				 unsigned int order_per_bit,
168 				 struct cma **res_cma)
169 {
170 	struct cma *cma;
171 	phys_addr_t alignment;
172 
173 	/* Sanity checks */
174 	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
175 		pr_err("Not enough slots for CMA reserved regions!\n");
176 		return -ENOSPC;
177 	}
178 
179 	if (!size || !memblock_is_region_reserved(base, size))
180 		return -EINVAL;
181 
182 	/* ensure minimal alignment required by mm core */
183 	alignment = PAGE_SIZE <<
184 			max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
185 
186 	/* alignment should be aligned with order_per_bit */
187 	if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
188 		return -EINVAL;
189 
190 	if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
191 		return -EINVAL;
192 
193 	/*
194 	 * Each reserved area must be initialised later, when more kernel
195 	 * subsystems (like slab allocator) are available.
196 	 */
197 	cma = &cma_areas[cma_area_count];
198 	cma->base_pfn = PFN_DOWN(base);
199 	cma->count = size >> PAGE_SHIFT;
200 	cma->order_per_bit = order_per_bit;
201 	*res_cma = cma;
202 	cma_area_count++;
203 	totalcma_pages += (size / PAGE_SIZE);
204 
205 	return 0;
206 }
207 
208 /**
209  * cma_declare_contiguous() - reserve custom contiguous area
210  * @base: Base address of the reserved area optional, use 0 for any
211  * @size: Size of the reserved area (in bytes),
212  * @limit: End address of the reserved memory (optional, 0 for any).
213  * @alignment: Alignment for the CMA area, should be power of 2 or zero
214  * @order_per_bit: Order of pages represented by one bit on bitmap.
215  * @fixed: hint about where to place the reserved area
216  * @res_cma: Pointer to store the created cma region.
217  *
218  * This function reserves memory from early allocator. It should be
219  * called by arch specific code once the early allocator (memblock or bootmem)
220  * has been activated and all other subsystems have already allocated/reserved
221  * memory. This function allows to create custom reserved areas.
222  *
223  * If @fixed is true, reserve contiguous area at exactly @base.  If false,
224  * reserve in range from @base to @limit.
225  */
cma_declare_contiguous(phys_addr_t base,phys_addr_t size,phys_addr_t limit,phys_addr_t alignment,unsigned int order_per_bit,bool fixed,struct cma ** res_cma)226 int __init cma_declare_contiguous(phys_addr_t base,
227 			phys_addr_t size, phys_addr_t limit,
228 			phys_addr_t alignment, unsigned int order_per_bit,
229 			bool fixed, struct cma **res_cma)
230 {
231 	phys_addr_t memblock_end = memblock_end_of_DRAM();
232 	phys_addr_t highmem_start;
233 	int ret = 0;
234 
235 #ifdef CONFIG_X86
236 	/*
237 	 * high_memory isn't direct mapped memory so retrieving its physical
238 	 * address isn't appropriate.  But it would be useful to check the
239 	 * physical address of the highmem boundary so it's justifiable to get
240 	 * the physical address from it.  On x86 there is a validation check for
241 	 * this case, so the following workaround is needed to avoid it.
242 	 */
243 	highmem_start = __pa_nodebug(high_memory);
244 #else
245 	highmem_start = __pa(high_memory);
246 #endif
247 	pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
248 		__func__, &size, &base, &limit, &alignment);
249 
250 	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
251 		pr_err("Not enough slots for CMA reserved regions!\n");
252 		return -ENOSPC;
253 	}
254 
255 	if (!size)
256 		return -EINVAL;
257 
258 	if (alignment && !is_power_of_2(alignment))
259 		return -EINVAL;
260 
261 	/*
262 	 * Sanitise input arguments.
263 	 * Pages both ends in CMA area could be merged into adjacent unmovable
264 	 * migratetype page by page allocator's buddy algorithm. In the case,
265 	 * you couldn't get a contiguous memory, which is not what we want.
266 	 */
267 	alignment = max(alignment,  (phys_addr_t)PAGE_SIZE <<
268 			  max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
269 	base = ALIGN(base, alignment);
270 	size = ALIGN(size, alignment);
271 	limit &= ~(alignment - 1);
272 
273 	if (!base)
274 		fixed = false;
275 
276 	/* size should be aligned with order_per_bit */
277 	if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
278 		return -EINVAL;
279 
280 	/*
281 	 * If allocating at a fixed base the request region must not cross the
282 	 * low/high memory boundary.
283 	 */
284 	if (fixed && base < highmem_start && base + size > highmem_start) {
285 		ret = -EINVAL;
286 		pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
287 			&base, &highmem_start);
288 		goto err;
289 	}
290 
291 	/*
292 	 * If the limit is unspecified or above the memblock end, its effective
293 	 * value will be the memblock end. Set it explicitly to simplify further
294 	 * checks.
295 	 */
296 	if (limit == 0 || limit > memblock_end)
297 		limit = memblock_end;
298 
299 	/* Reserve memory */
300 	if (fixed) {
301 		if (memblock_is_region_reserved(base, size) ||
302 		    memblock_reserve(base, size) < 0) {
303 			ret = -EBUSY;
304 			goto err;
305 		}
306 	} else {
307 		phys_addr_t addr = 0;
308 
309 		/*
310 		 * All pages in the reserved area must come from the same zone.
311 		 * If the requested region crosses the low/high memory boundary,
312 		 * try allocating from high memory first and fall back to low
313 		 * memory in case of failure.
314 		 */
315 		if (base < highmem_start && limit > highmem_start) {
316 			addr = memblock_alloc_range(size, alignment,
317 						    highmem_start, limit,
318 						    MEMBLOCK_NONE);
319 			limit = highmem_start;
320 		}
321 
322 		if (!addr) {
323 			addr = memblock_alloc_range(size, alignment, base,
324 						    limit,
325 						    MEMBLOCK_NONE);
326 			if (!addr) {
327 				ret = -ENOMEM;
328 				goto err;
329 			}
330 		}
331 
332 		/*
333 		 * kmemleak scans/reads tracked objects for pointers to other
334 		 * objects but this address isn't mapped and accessible
335 		 */
336 		kmemleak_ignore_phys(addr);
337 		base = addr;
338 	}
339 
340 	ret = cma_init_reserved_mem(base, size, order_per_bit, res_cma);
341 	if (ret)
342 		goto err;
343 
344 	pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
345 		&base);
346 	return 0;
347 
348 err:
349 	pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
350 	return ret;
351 }
352 
353 /**
354  * cma_alloc() - allocate pages from contiguous area
355  * @cma:   Contiguous memory region for which the allocation is performed.
356  * @count: Requested number of pages.
357  * @align: Requested alignment of pages (in PAGE_SIZE order).
358  *
359  * This function allocates part of contiguous memory on specific
360  * contiguous memory area.
361  */
cma_alloc(struct cma * cma,size_t count,unsigned int align)362 struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align)
363 {
364 	unsigned long mask, offset;
365 	unsigned long pfn = -1;
366 	unsigned long start = 0;
367 	unsigned long bitmap_maxno, bitmap_no, bitmap_count;
368 	struct page *page = NULL;
369 	int ret;
370 
371 	if (!cma || !cma->count)
372 		return NULL;
373 
374 	pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma,
375 		 count, align);
376 
377 	if (!count)
378 		return NULL;
379 
380 	mask = cma_bitmap_aligned_mask(cma, align);
381 	offset = cma_bitmap_aligned_offset(cma, align);
382 	bitmap_maxno = cma_bitmap_maxno(cma);
383 	bitmap_count = cma_bitmap_pages_to_bits(cma, count);
384 
385 	if (bitmap_count > bitmap_maxno)
386 		return NULL;
387 
388 	for (;;) {
389 		mutex_lock(&cma->lock);
390 		bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
391 				bitmap_maxno, start, bitmap_count, mask,
392 				offset);
393 		if (bitmap_no >= bitmap_maxno) {
394 			mutex_unlock(&cma->lock);
395 			break;
396 		}
397 		bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
398 		/*
399 		 * It's safe to drop the lock here. We've marked this region for
400 		 * our exclusive use. If the migration fails we will take the
401 		 * lock again and unmark it.
402 		 */
403 		mutex_unlock(&cma->lock);
404 
405 		pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
406 		mutex_lock(&cma_mutex);
407 		ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
408 		mutex_unlock(&cma_mutex);
409 		if (ret == 0) {
410 			page = pfn_to_page(pfn);
411 			break;
412 		}
413 
414 		cma_clear_bitmap(cma, pfn, count);
415 		if (ret != -EBUSY)
416 			break;
417 
418 		pr_debug("%s(): memory range at %p is busy, retrying\n",
419 			 __func__, pfn_to_page(pfn));
420 		/* try again with a bit different memory target */
421 		start = bitmap_no + mask + 1;
422 	}
423 
424 	trace_cma_alloc(pfn, page, count, align);
425 
426 	pr_debug("%s(): returned %p\n", __func__, page);
427 	return page;
428 }
429 
430 /**
431  * cma_release() - release allocated pages
432  * @cma:   Contiguous memory region for which the allocation is performed.
433  * @pages: Allocated pages.
434  * @count: Number of allocated pages.
435  *
436  * This function releases memory allocated by alloc_cma().
437  * It returns false when provided pages do not belong to contiguous area and
438  * true otherwise.
439  */
cma_release(struct cma * cma,const struct page * pages,unsigned int count)440 bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
441 {
442 	unsigned long pfn;
443 
444 	if (!cma || !pages)
445 		return false;
446 
447 	pr_debug("%s(page %p)\n", __func__, (void *)pages);
448 
449 	pfn = page_to_pfn(pages);
450 
451 	if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
452 		return false;
453 
454 	VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
455 
456 	free_contig_range(pfn, count);
457 	cma_clear_bitmap(cma, pfn, count);
458 	trace_cma_release(pfn, pages, count);
459 
460 	return true;
461 }
462