• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ION Memory Allocator
4  *
5  * Copyright (C) 2011 Google, Inc.
6  * Copyright (c) 2019, The Linux Foundation. All rights reserved.
7  *
8  */
9 
10 #include <linux/bitmap.h>
11 #include <linux/debugfs.h>
12 #include <linux/device.h>
13 #include <linux/dma-buf.h>
14 #include <linux/err.h>
15 #include <linux/export.h>
16 #include <linux/file.h>
17 #include <linux/freezer.h>
18 #include <linux/fs.h>
19 #include <linux/kthread.h>
20 #include <linux/list.h>
21 #include <linux/mm.h>
22 #include <linux/mm_types.h>
23 #include <linux/rbtree.h>
24 #include <linux/sched/task.h>
25 #include <linux/slab.h>
26 #include <linux/uaccess.h>
27 
28 #include "ion_private.h"
29 
30 #define ION_CURRENT_ABI_VERSION  2
31 
32 static struct ion_device *internal_dev;
33 
34 /* Entry into ION allocator for rest of the kernel */
ion_alloc(size_t len,unsigned int heap_id_mask,unsigned int flags)35 struct dma_buf *ion_alloc(size_t len, unsigned int heap_id_mask,
36 			  unsigned int flags)
37 {
38 	return ion_dmabuf_alloc(internal_dev, len, heap_id_mask, flags);
39 }
40 EXPORT_SYMBOL_GPL(ion_alloc);
41 
ion_free(struct ion_buffer * buffer)42 int ion_free(struct ion_buffer *buffer)
43 {
44 	return ion_buffer_destroy(internal_dev, buffer);
45 }
46 EXPORT_SYMBOL_GPL(ion_free);
47 
ion_alloc_fd(size_t len,unsigned int heap_id_mask,unsigned int flags)48 static int ion_alloc_fd(size_t len, unsigned int heap_id_mask,
49 			unsigned int flags)
50 {
51 	int fd;
52 	struct dma_buf *dmabuf;
53 
54 	dmabuf = ion_dmabuf_alloc(internal_dev, len, heap_id_mask, flags);
55 	if (IS_ERR(dmabuf))
56 		return PTR_ERR(dmabuf);
57 
58 	fd = dma_buf_fd(dmabuf, O_CLOEXEC);
59 	if (fd < 0)
60 		dma_buf_put(dmabuf);
61 
62 	return fd;
63 }
64 
ion_query_heaps(struct ion_heap_query * query)65 static int ion_query_heaps(struct ion_heap_query *query)
66 {
67 	struct ion_device *dev = internal_dev;
68 	struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps);
69 	int ret = -EINVAL, cnt = 0, max_cnt;
70 	struct ion_heap *heap;
71 	struct ion_heap_data hdata;
72 
73 	memset(&hdata, 0, sizeof(hdata));
74 
75 	down_read(&dev->lock);
76 	if (!buffer) {
77 		query->cnt = dev->heap_cnt;
78 		ret = 0;
79 		goto out;
80 	}
81 
82 	if (query->cnt <= 0)
83 		goto out;
84 
85 	max_cnt = query->cnt;
86 
87 	plist_for_each_entry(heap, &dev->heaps, node) {
88 		strncpy(hdata.name, heap->name, MAX_HEAP_NAME);
89 		hdata.name[sizeof(hdata.name) - 1] = '\0';
90 		hdata.type = heap->type;
91 		hdata.heap_id = heap->id;
92 
93 		if (copy_to_user(&buffer[cnt], &hdata, sizeof(hdata))) {
94 			ret = -EFAULT;
95 			goto out;
96 		}
97 
98 		cnt++;
99 		if (cnt >= max_cnt)
100 			break;
101 	}
102 
103 	query->cnt = cnt;
104 	ret = 0;
105 out:
106 	up_read(&dev->lock);
107 	return ret;
108 }
109 
110 union ion_ioctl_arg {
111 	struct ion_allocation_data allocation;
112 	struct ion_heap_query query;
113 	u32 ion_abi_version;
114 };
115 
validate_ioctl_arg(unsigned int cmd,union ion_ioctl_arg * arg)116 static int validate_ioctl_arg(unsigned int cmd, union ion_ioctl_arg *arg)
117 {
118 	switch (cmd) {
119 	case ION_IOC_HEAP_QUERY:
120 		if (arg->query.reserved0 ||
121 		    arg->query.reserved1 ||
122 		    arg->query.reserved2)
123 			return -EINVAL;
124 		break;
125 	default:
126 		break;
127 	}
128 
129 	return 0;
130 }
131 
ion_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)132 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
133 {
134 	int ret = 0;
135 	union ion_ioctl_arg data;
136 
137 	if (_IOC_SIZE(cmd) > sizeof(data))
138 		return -EINVAL;
139 
140 	/*
141 	 * The copy_from_user is unconditional here for both read and write
142 	 * to do the validate. If there is no write for the ioctl, the
143 	 * buffer is cleared
144 	 */
145 	if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
146 		return -EFAULT;
147 
148 	ret = validate_ioctl_arg(cmd, &data);
149 	if (ret) {
150 		pr_warn_once("%s: ioctl validate failed\n", __func__);
151 		return ret;
152 	}
153 
154 	if (!(_IOC_DIR(cmd) & _IOC_WRITE))
155 		memset(&data, 0, sizeof(data));
156 
157 	switch (cmd) {
158 	case ION_IOC_ALLOC:
159 	{
160 		int fd;
161 
162 		fd = ion_alloc_fd(data.allocation.len,
163 				  data.allocation.heap_id_mask,
164 				  data.allocation.flags);
165 		if (fd < 0)
166 			return fd;
167 
168 		data.allocation.fd = fd;
169 
170 		break;
171 	}
172 	case ION_IOC_HEAP_QUERY:
173 		ret = ion_query_heaps(&data.query);
174 		break;
175 	case ION_IOC_ABI_VERSION:
176 		data.ion_abi_version = ION_CURRENT_ABI_VERSION;
177 		break;
178 	default:
179 		return -ENOTTY;
180 	}
181 
182 	if (_IOC_DIR(cmd) & _IOC_READ) {
183 		if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd)))
184 			return -EFAULT;
185 	}
186 	return ret;
187 }
188 
189 static const struct file_operations ion_fops = {
190 	.owner          = THIS_MODULE,
191 	.unlocked_ioctl = ion_ioctl,
192 #ifdef CONFIG_COMPAT
193 	.compat_ioctl	= ion_ioctl,
194 #endif
195 };
196 
debug_shrink_set(void * data,u64 val)197 static int debug_shrink_set(void *data, u64 val)
198 {
199 	struct ion_heap *heap = data;
200 	struct shrink_control sc;
201 	int objs;
202 
203 	sc.gfp_mask = GFP_HIGHUSER;
204 	sc.nr_to_scan = val;
205 
206 	if (!val) {
207 		objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
208 		sc.nr_to_scan = objs;
209 	}
210 
211 	heap->shrinker.scan_objects(&heap->shrinker, &sc);
212 	return 0;
213 }
214 
debug_shrink_get(void * data,u64 * val)215 static int debug_shrink_get(void *data, u64 *val)
216 {
217 	struct ion_heap *heap = data;
218 	struct shrink_control sc;
219 	int objs;
220 
221 	sc.gfp_mask = GFP_HIGHUSER;
222 	sc.nr_to_scan = 0;
223 
224 	objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
225 	*val = objs;
226 	return 0;
227 }
228 
229 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
230 			debug_shrink_set, "%llu\n");
231 
ion_assign_heap_id(struct ion_heap * heap,struct ion_device * dev)232 static int ion_assign_heap_id(struct ion_heap *heap, struct ion_device *dev)
233 {
234 	int id_bit = -EINVAL;
235 	int start_bit = -1, end_bit = -1;
236 
237 	switch (heap->type) {
238 	case ION_HEAP_TYPE_SYSTEM:
239 		id_bit = __ffs(ION_HEAP_SYSTEM);
240 		break;
241 	case ION_HEAP_TYPE_SYSTEM_CONTIG:
242 		id_bit = __ffs(ION_HEAP_SYSTEM_CONTIG);
243 		break;
244 	case ION_HEAP_TYPE_CHUNK:
245 		id_bit = __ffs(ION_HEAP_CHUNK);
246 		break;
247 	case ION_HEAP_TYPE_CARVEOUT:
248 		start_bit = __ffs(ION_HEAP_CARVEOUT_START);
249 		end_bit = __ffs(ION_HEAP_CARVEOUT_END);
250 		break;
251 	case ION_HEAP_TYPE_DMA:
252 		start_bit = __ffs(ION_HEAP_DMA_START);
253 		end_bit = __ffs(ION_HEAP_DMA_END);
254 		break;
255 	case ION_HEAP_TYPE_CUSTOM ... ION_HEAP_TYPE_MAX:
256 		start_bit = __ffs(ION_HEAP_CUSTOM_START);
257 		end_bit = __ffs(ION_HEAP_CUSTOM_END);
258 		break;
259 	default:
260 		return -EINVAL;
261 	}
262 
263 	/* For carveout, dma & custom heaps, we first let the heaps choose their
264 	 * own IDs. This allows the old behaviour of knowing the heap ids
265 	 * of these type of heaps  in advance in user space. If a heap with
266 	 * that ID already exists, it is an error.
267 	 *
268 	 * If the heap hasn't picked an id by itself, then we assign it
269 	 * one.
270 	 */
271 	if (id_bit < 0) {
272 		if (heap->id) {
273 			id_bit = __ffs(heap->id);
274 			if (id_bit < start_bit || id_bit > end_bit)
275 				return -EINVAL;
276 		} else {
277 			id_bit = find_next_zero_bit(dev->heap_ids, end_bit + 1,
278 						    start_bit);
279 			if (id_bit > end_bit)
280 				return -ENOSPC;
281 		}
282 	}
283 
284 	if (test_and_set_bit(id_bit, dev->heap_ids))
285 		return -EEXIST;
286 	heap->id = id_bit;
287 	dev->heap_cnt++;
288 
289 	return 0;
290 }
291 
__ion_device_add_heap(struct ion_heap * heap,struct module * owner)292 int __ion_device_add_heap(struct ion_heap *heap, struct module *owner)
293 {
294 	struct ion_device *dev = internal_dev;
295 	int ret;
296 	struct dentry *heap_root;
297 	char debug_name[64];
298 
299 	if (!heap || !heap->ops || !heap->ops->allocate || !heap->ops->free) {
300 		pr_err("%s: invalid heap or heap_ops\n", __func__);
301 		ret = -EINVAL;
302 		goto out;
303 	}
304 
305 	heap->owner = owner;
306 	spin_lock_init(&heap->free_lock);
307 	spin_lock_init(&heap->stat_lock);
308 	heap->free_list_size = 0;
309 
310 	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) {
311 		ret = ion_heap_init_deferred_free(heap);
312 		if (ret)
313 			goto out_heap_cleanup;
314 	}
315 
316 	if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink) {
317 		ret = ion_heap_init_shrinker(heap);
318 		if (ret) {
319 			pr_err("%s: Failed to register shrinker\n", __func__);
320 			goto out_heap_cleanup;
321 		}
322 	}
323 
324 	heap->num_of_buffers = 0;
325 	heap->num_of_alloc_bytes = 0;
326 	heap->alloc_bytes_wm = 0;
327 
328 	heap_root = debugfs_create_dir(heap->name, dev->debug_root);
329 	debugfs_create_u64("num_of_buffers",
330 			   0444, heap_root,
331 			   &heap->num_of_buffers);
332 	debugfs_create_u64("num_of_alloc_bytes",
333 			   0444,
334 			   heap_root,
335 			   &heap->num_of_alloc_bytes);
336 	debugfs_create_u64("alloc_bytes_wm",
337 			   0444,
338 			   heap_root,
339 			   &heap->alloc_bytes_wm);
340 
341 	if (heap->shrinker.count_objects &&
342 	    heap->shrinker.scan_objects) {
343 		snprintf(debug_name, 64, "%s_shrink", heap->name);
344 		debugfs_create_file(debug_name,
345 				    0644,
346 				    heap_root,
347 				    heap,
348 				    &debug_shrink_fops);
349 	}
350 
351 	heap->debugfs_dir = heap_root;
352 	down_write(&dev->lock);
353 	ret = ion_assign_heap_id(heap, dev);
354 	if (ret) {
355 		pr_err("%s: Failed to assign heap id for heap type %x\n",
356 		       __func__, heap->type);
357 		up_write(&dev->lock);
358 		goto out_debugfs_cleanup;
359 	}
360 
361 	/*
362 	 * use negative heap->id to reverse the priority -- when traversing
363 	 * the list later attempt higher id numbers first
364 	 */
365 	plist_node_init(&heap->node, -heap->id);
366 	plist_add(&heap->node, &dev->heaps);
367 
368 	up_write(&dev->lock);
369 
370 	return 0;
371 
372 out_debugfs_cleanup:
373 	debugfs_remove_recursive(heap->debugfs_dir);
374 out_heap_cleanup:
375 	ion_heap_cleanup(heap);
376 out:
377 	return ret;
378 }
379 EXPORT_SYMBOL_GPL(__ion_device_add_heap);
380 
ion_device_remove_heap(struct ion_heap * heap)381 void ion_device_remove_heap(struct ion_heap *heap)
382 {
383 	struct ion_device *dev = internal_dev;
384 
385 	if (!heap) {
386 		pr_err("%s: Invalid argument\n", __func__);
387 		return;
388 	}
389 
390 	// take semaphore and remove the heap from dev->heap list
391 	down_write(&dev->lock);
392 	/* So no new allocations can happen from this heap */
393 	plist_del(&heap->node, &dev->heaps);
394 	if (ion_heap_cleanup(heap) != 0) {
395 		pr_warn("%s: failed to cleanup heap (%s)\n",
396 			__func__, heap->name);
397 	}
398 	debugfs_remove_recursive(heap->debugfs_dir);
399 	clear_bit(heap->id, dev->heap_ids);
400 	dev->heap_cnt--;
401 	up_write(&dev->lock);
402 }
403 EXPORT_SYMBOL(ion_device_remove_heap);
404 
ion_device_create(void)405 static int ion_device_create(void)
406 {
407 	struct ion_device *idev;
408 	int ret;
409 
410 	idev = kzalloc(sizeof(*idev), GFP_KERNEL);
411 	if (!idev)
412 		return -ENOMEM;
413 
414 	idev->dev.minor = MISC_DYNAMIC_MINOR;
415 	idev->dev.name = "ion";
416 	idev->dev.fops = &ion_fops;
417 	idev->dev.parent = NULL;
418 	ret = misc_register(&idev->dev);
419 	if (ret) {
420 		pr_err("ion: failed to register misc device.\n");
421 		kfree(idev);
422 		return ret;
423 	}
424 
425 	idev->debug_root = debugfs_create_dir("ion", NULL);
426 	init_rwsem(&idev->lock);
427 	plist_head_init(&idev->heaps);
428 	internal_dev = idev;
429 	return 0;
430 }
431 subsys_initcall(ion_device_create);
432