• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * drivers/staging/android/ion/ion_carveout_heap.c
3  *
4  * Copyright (C) 2011 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16 #include <linux/spinlock.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/err.h>
19 #include <linux/genalloc.h>
20 #include <linux/io.h>
21 #include <linux/mm.h>
22 #include <linux/scatterlist.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include "ion.h"
26 #include "ion_priv.h"
27 
28 struct ion_carveout_heap {
29 	struct ion_heap heap;
30 	struct gen_pool *pool;
31 	ion_phys_addr_t base;
32 };
33 
ion_carveout_allocate(struct ion_heap * heap,unsigned long size,unsigned long align)34 ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
35 				      unsigned long size,
36 				      unsigned long align)
37 {
38 	struct ion_carveout_heap *carveout_heap =
39 		container_of(heap, struct ion_carveout_heap, heap);
40 	unsigned long offset = gen_pool_alloc(carveout_heap->pool, size);
41 
42 	if (!offset)
43 		return ION_CARVEOUT_ALLOCATE_FAIL;
44 
45 	return offset;
46 }
47 
ion_carveout_free(struct ion_heap * heap,ion_phys_addr_t addr,unsigned long size)48 void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
49 		       unsigned long size)
50 {
51 	struct ion_carveout_heap *carveout_heap =
52 		container_of(heap, struct ion_carveout_heap, heap);
53 
54 	if (addr == ION_CARVEOUT_ALLOCATE_FAIL)
55 		return;
56 	gen_pool_free(carveout_heap->pool, addr, size);
57 }
58 
ion_carveout_heap_phys(struct ion_heap * heap,struct ion_buffer * buffer,ion_phys_addr_t * addr,size_t * len)59 static int ion_carveout_heap_phys(struct ion_heap *heap,
60 				  struct ion_buffer *buffer,
61 				  ion_phys_addr_t *addr, size_t *len)
62 {
63 	struct sg_table *table = buffer->priv_virt;
64 	struct page *page = sg_page(table->sgl);
65 	ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
66 
67 	*addr = paddr;
68 	*len = buffer->size;
69 	return 0;
70 }
71 
ion_carveout_heap_allocate(struct ion_heap * heap,struct ion_buffer * buffer,unsigned long size,unsigned long align,unsigned long flags)72 static int ion_carveout_heap_allocate(struct ion_heap *heap,
73 				      struct ion_buffer *buffer,
74 				      unsigned long size, unsigned long align,
75 				      unsigned long flags)
76 {
77 	struct sg_table *table;
78 	ion_phys_addr_t paddr;
79 	int ret;
80 
81 	if (align > PAGE_SIZE)
82 		return -EINVAL;
83 
84 	table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
85 	if (!table)
86 		return -ENOMEM;
87 	ret = sg_alloc_table(table, 1, GFP_KERNEL);
88 	if (ret)
89 		goto err_free;
90 
91 	paddr = ion_carveout_allocate(heap, size, align);
92 	if (paddr == ION_CARVEOUT_ALLOCATE_FAIL) {
93 		ret = -ENOMEM;
94 		goto err_free_table;
95 	}
96 
97 	sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0);
98 	buffer->priv_virt = table;
99 
100 	return 0;
101 
102 err_free_table:
103 	sg_free_table(table);
104 err_free:
105 	kfree(table);
106 	return ret;
107 }
108 
ion_carveout_heap_free(struct ion_buffer * buffer)109 static void ion_carveout_heap_free(struct ion_buffer *buffer)
110 {
111 	struct ion_heap *heap = buffer->heap;
112 	struct sg_table *table = buffer->priv_virt;
113 	struct page *page = sg_page(table->sgl);
114 	ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
115 
116 	ion_heap_buffer_zero(buffer);
117 
118 	if (ion_buffer_cached(buffer))
119 		dma_sync_sg_for_device(NULL, table->sgl, table->nents,
120 							DMA_BIDIRECTIONAL);
121 
122 	ion_carveout_free(heap, paddr, buffer->size);
123 	sg_free_table(table);
124 	kfree(table);
125 }
126 
ion_carveout_heap_map_dma(struct ion_heap * heap,struct ion_buffer * buffer)127 static struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
128 						  struct ion_buffer *buffer)
129 {
130 	return buffer->priv_virt;
131 }
132 
ion_carveout_heap_unmap_dma(struct ion_heap * heap,struct ion_buffer * buffer)133 static void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
134 					struct ion_buffer *buffer)
135 {
136 }
137 
138 static struct ion_heap_ops carveout_heap_ops = {
139 	.allocate = ion_carveout_heap_allocate,
140 	.free = ion_carveout_heap_free,
141 	.phys = ion_carveout_heap_phys,
142 	.map_dma = ion_carveout_heap_map_dma,
143 	.unmap_dma = ion_carveout_heap_unmap_dma,
144 	.map_user = ion_heap_map_user,
145 	.map_kernel = ion_heap_map_kernel,
146 	.unmap_kernel = ion_heap_unmap_kernel,
147 };
148 
ion_carveout_heap_create(struct ion_platform_heap * heap_data)149 struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
150 {
151 	struct ion_carveout_heap *carveout_heap;
152 	int ret;
153 
154 	struct page *page;
155 	size_t size;
156 
157 	page = pfn_to_page(PFN_DOWN(heap_data->base));
158 	size = heap_data->size;
159 
160 	ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
161 
162 	ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
163 	if (ret)
164 		return ERR_PTR(ret);
165 
166 	carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL);
167 	if (!carveout_heap)
168 		return ERR_PTR(-ENOMEM);
169 
170 	carveout_heap->pool = gen_pool_create(PAGE_SHIFT, -1);
171 	if (!carveout_heap->pool) {
172 		kfree(carveout_heap);
173 		return ERR_PTR(-ENOMEM);
174 	}
175 	carveout_heap->base = heap_data->base;
176 	gen_pool_add(carveout_heap->pool, carveout_heap->base, heap_data->size,
177 		     -1);
178 	carveout_heap->heap.ops = &carveout_heap_ops;
179 	carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT;
180 	carveout_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
181 
182 	return &carveout_heap->heap;
183 }
184 
ion_carveout_heap_destroy(struct ion_heap * heap)185 void ion_carveout_heap_destroy(struct ion_heap *heap)
186 {
187 	struct ion_carveout_heap *carveout_heap =
188 	     container_of(heap, struct  ion_carveout_heap, heap);
189 
190 	gen_pool_destroy(carveout_heap->pool);
191 	kfree(carveout_heap);
192 	carveout_heap = NULL;
193 }
194