1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * ION Memory Allocator - buffer interface
4 *
5 * Copyright (c) 2019, Google, Inc.
6 */
7
8 #include <linux/mm.h>
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/vmalloc.h>
12 #include <linux/dma-noncoherent.h>
13
14 #define CREATE_TRACE_POINTS
15 #include "ion_trace.h"
16 #include "ion_private.h"
17
18 static atomic_long_t total_heap_bytes;
19
track_buffer_created(struct ion_buffer * buffer)20 static void track_buffer_created(struct ion_buffer *buffer)
21 {
22 long total = atomic_long_add_return(buffer->size, &total_heap_bytes);
23
24 trace_ion_stat(buffer->sg_table, buffer->size, total);
25 }
26
track_buffer_destroyed(struct ion_buffer * buffer)27 static void track_buffer_destroyed(struct ion_buffer *buffer)
28 {
29 long total = atomic_long_sub_return(buffer->size, &total_heap_bytes);
30
31 trace_ion_stat(buffer->sg_table, -buffer->size, total);
32 }
33
34 /* this function should only be called while dev->lock is held */
ion_buffer_create(struct ion_heap * heap,struct ion_device * dev,unsigned long len,unsigned long flags)35 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
36 struct ion_device *dev,
37 unsigned long len,
38 unsigned long flags)
39 {
40 struct ion_buffer *buffer;
41 int ret;
42
43 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
44 if (!buffer)
45 return ERR_PTR(-ENOMEM);
46
47 buffer->heap = heap;
48 buffer->flags = flags;
49 buffer->size = len;
50
51 ret = heap->ops->allocate(heap, buffer, len, flags);
52
53 if (ret) {
54 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
55 goto err2;
56
57 ion_heap_freelist_drain(heap, 0);
58 ret = heap->ops->allocate(heap, buffer, len, flags);
59 if (ret)
60 goto err2;
61 }
62
63 if (!buffer->sg_table) {
64 WARN_ONCE(1, "This heap needs to set the sgtable");
65 ret = -EINVAL;
66 goto err1;
67 }
68
69 spin_lock(&heap->stat_lock);
70 heap->num_of_buffers++;
71 heap->num_of_alloc_bytes += len;
72 if (heap->num_of_alloc_bytes > heap->alloc_bytes_wm)
73 heap->alloc_bytes_wm = heap->num_of_alloc_bytes;
74 if (heap->num_of_buffers == 1) {
75 /* This module reference lasts as long as at least one
76 * buffer is allocated from the heap. We are protected
77 * against ion_device_remove_heap() with dev->lock, so we can
78 * safely assume the module reference is going to* succeed.
79 */
80 __module_get(heap->owner);
81 }
82 spin_unlock(&heap->stat_lock);
83
84 INIT_LIST_HEAD(&buffer->attachments);
85 mutex_init(&buffer->lock);
86 track_buffer_created(buffer);
87 return buffer;
88
89 err1:
90 heap->ops->free(buffer);
91 err2:
92 kfree(buffer);
93 return ERR_PTR(ret);
94 }
95
ion_clear_pages(struct page ** pages,int num,pgprot_t pgprot)96 static int ion_clear_pages(struct page **pages, int num, pgprot_t pgprot)
97 {
98 void *addr = vmap(pages, num, VM_MAP, pgprot);
99
100 if (!addr)
101 return -ENOMEM;
102 memset(addr, 0, PAGE_SIZE * num);
103 vunmap(addr);
104
105 return 0;
106 }
107
ion_sglist_zero(struct scatterlist * sgl,unsigned int nents,pgprot_t pgprot)108 static int ion_sglist_zero(struct scatterlist *sgl, unsigned int nents,
109 pgprot_t pgprot)
110 {
111 int p = 0;
112 int ret = 0;
113 struct sg_page_iter piter;
114 struct page *pages[32];
115
116 for_each_sg_page(sgl, &piter, nents, 0) {
117 pages[p++] = sg_page_iter_page(&piter);
118 if (p == ARRAY_SIZE(pages)) {
119 ret = ion_clear_pages(pages, p, pgprot);
120 if (ret)
121 return ret;
122 p = 0;
123 }
124 }
125 if (p)
126 ret = ion_clear_pages(pages, p, pgprot);
127
128 return ret;
129 }
130
ion_buffer_alloc(struct ion_device * dev,size_t len,unsigned int heap_id_mask,unsigned int flags)131 struct ion_buffer *ion_buffer_alloc(struct ion_device *dev, size_t len,
132 unsigned int heap_id_mask,
133 unsigned int flags)
134 {
135 struct ion_buffer *buffer = NULL;
136 struct ion_heap *heap;
137
138 if (!dev || !len) {
139 return ERR_PTR(-EINVAL);
140 }
141
142 /*
143 * traverse the list of heaps available in this system in priority
144 * order. If the heap type is supported by the client, and matches the
145 * request of the caller allocate from it. Repeat until allocate has
146 * succeeded or all heaps have been tried
147 */
148 len = PAGE_ALIGN(len);
149 if (!len)
150 return ERR_PTR(-EINVAL);
151
152 down_read(&dev->lock);
153 plist_for_each_entry(heap, &dev->heaps, node) {
154 /* if the caller didn't specify this heap id */
155 if (!((1 << heap->id) & heap_id_mask))
156 continue;
157 buffer = ion_buffer_create(heap, dev, len, flags);
158 if (!IS_ERR(buffer))
159 break;
160 }
161 up_read(&dev->lock);
162
163 if (!buffer)
164 return ERR_PTR(-ENODEV);
165
166 if (IS_ERR(buffer))
167 return ERR_CAST(buffer);
168
169 return buffer;
170 }
171
ion_buffer_zero(struct ion_buffer * buffer)172 int ion_buffer_zero(struct ion_buffer *buffer)
173 {
174 struct sg_table *table;
175 pgprot_t pgprot;
176
177 if (!buffer)
178 return -EINVAL;
179
180 table = buffer->sg_table;
181 if (buffer->flags & ION_FLAG_CACHED)
182 pgprot = PAGE_KERNEL;
183 else
184 pgprot = pgprot_writecombine(PAGE_KERNEL);
185
186 return ion_sglist_zero(table->sgl, table->nents, pgprot);
187 }
188 EXPORT_SYMBOL_GPL(ion_buffer_zero);
189
ion_buffer_prep_noncached(struct ion_buffer * buffer)190 void ion_buffer_prep_noncached(struct ion_buffer *buffer)
191 {
192 struct scatterlist *sg;
193 struct sg_table *table;
194 int i;
195
196 if (WARN_ONCE(!buffer || !buffer->sg_table,
197 "%s needs a buffer and a sg_table", __func__) ||
198 buffer->flags & ION_FLAG_CACHED)
199 return;
200
201 table = buffer->sg_table;
202
203 for_each_sg(table->sgl, sg, table->orig_nents, i)
204 arch_dma_prep_coherent(sg_page(sg), sg->length);
205 }
206 EXPORT_SYMBOL_GPL(ion_buffer_prep_noncached);
207
ion_buffer_release(struct ion_buffer * buffer)208 void ion_buffer_release(struct ion_buffer *buffer)
209 {
210 if (buffer->kmap_cnt > 0) {
211 pr_warn_once("%s: buffer still mapped in the kernel\n",
212 __func__);
213 ion_heap_unmap_kernel(buffer->heap, buffer);
214 }
215 buffer->heap->ops->free(buffer);
216 spin_lock(&buffer->heap->stat_lock);
217 buffer->heap->num_of_buffers--;
218 buffer->heap->num_of_alloc_bytes -= buffer->size;
219 if (buffer->heap->num_of_buffers == 0)
220 module_put(buffer->heap->owner);
221 spin_unlock(&buffer->heap->stat_lock);
222 /* drop reference to the heap module */
223
224 kfree(buffer);
225 }
226
ion_buffer_destroy(struct ion_device * dev,struct ion_buffer * buffer)227 int ion_buffer_destroy(struct ion_device *dev, struct ion_buffer *buffer)
228 {
229 struct ion_heap *heap;
230
231 if (!dev || !buffer) {
232 pr_warn("%s: invalid argument\n", __func__);
233 return -EINVAL;
234 }
235
236 heap = buffer->heap;
237 track_buffer_destroyed(buffer);
238
239 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
240 ion_heap_freelist_add(heap, buffer);
241 else
242 ion_buffer_release(buffer);
243
244 return 0;
245 }
246
ion_buffer_kmap_get(struct ion_buffer * buffer)247 void *ion_buffer_kmap_get(struct ion_buffer *buffer)
248 {
249 void *vaddr;
250
251 if (buffer->kmap_cnt) {
252 if (buffer->kmap_cnt == INT_MAX)
253 return ERR_PTR(-EOVERFLOW);
254
255 buffer->kmap_cnt++;
256 return buffer->vaddr;
257 }
258 vaddr = ion_heap_map_kernel(buffer->heap, buffer);
259 if (WARN_ONCE(!vaddr,
260 "heap->ops->map_kernel should return ERR_PTR on error"))
261 return ERR_PTR(-EINVAL);
262 if (IS_ERR(vaddr))
263 return vaddr;
264 buffer->vaddr = vaddr;
265 buffer->kmap_cnt++;
266 return vaddr;
267 }
268
ion_buffer_kmap_put(struct ion_buffer * buffer)269 void ion_buffer_kmap_put(struct ion_buffer *buffer)
270 {
271 buffer->kmap_cnt--;
272 if (!buffer->kmap_cnt) {
273 ion_heap_unmap_kernel(buffer->heap, buffer);
274 buffer->vaddr = NULL;
275 }
276 }
277
ion_get_total_heap_bytes(void)278 u64 ion_get_total_heap_bytes(void)
279 {
280 return atomic_long_read(&total_heap_bytes);
281 }
282