1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * ION Memory Allocator
4 *
5 * Copyright (C) 2011 Google, Inc.
6 * Copyright (c) 2019, The Linux Foundation. All rights reserved.
7 *
8 */
9
10 #include <linux/bitmap.h>
11 #include <linux/debugfs.h>
12 #include <linux/device.h>
13 #include <linux/dma-buf.h>
14 #include <linux/err.h>
15 #include <linux/export.h>
16 #include <linux/file.h>
17 #include <linux/freezer.h>
18 #include <linux/fs.h>
19 #include <linux/kthread.h>
20 #include <linux/list.h>
21 #include <linux/mm.h>
22 #include <linux/mm_types.h>
23 #include <linux/rbtree.h>
24 #include <linux/sched/task.h>
25 #include <linux/slab.h>
26 #include <linux/uaccess.h>
27
28 #include "ion_private.h"
29
30 #define ION_CURRENT_ABI_VERSION 2
31
32 static struct ion_device *internal_dev;
33
34 /* Entry into ION allocator for rest of the kernel */
ion_alloc(size_t len,unsigned int heap_id_mask,unsigned int flags)35 struct dma_buf *ion_alloc(size_t len, unsigned int heap_id_mask,
36 unsigned int flags)
37 {
38 return ion_dmabuf_alloc(internal_dev, len, heap_id_mask, flags);
39 }
40 EXPORT_SYMBOL_GPL(ion_alloc);
41
ion_free(struct ion_buffer * buffer)42 int ion_free(struct ion_buffer *buffer)
43 {
44 return ion_buffer_destroy(internal_dev, buffer);
45 }
46 EXPORT_SYMBOL_GPL(ion_free);
47
ion_alloc_fd(size_t len,unsigned int heap_id_mask,unsigned int flags)48 static int ion_alloc_fd(size_t len, unsigned int heap_id_mask,
49 unsigned int flags)
50 {
51 int fd;
52 struct dma_buf *dmabuf;
53
54 dmabuf = ion_dmabuf_alloc(internal_dev, len, heap_id_mask, flags);
55 if (IS_ERR(dmabuf))
56 return PTR_ERR(dmabuf);
57
58 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
59 if (fd < 0)
60 dma_buf_put(dmabuf);
61
62 return fd;
63 }
64
ion_query_heaps_kernel(struct ion_heap_data * hdata,size_t size)65 size_t ion_query_heaps_kernel(struct ion_heap_data *hdata, size_t size)
66 {
67 struct ion_device *dev = internal_dev;
68 size_t i = 0, num_heaps = 0;
69 struct ion_heap *heap;
70
71 down_read(&dev->lock);
72
73 // If size is 0, return without updating hdata.
74 if (size == 0) {
75 num_heaps = dev->heap_cnt;
76 goto out;
77 }
78
79 plist_for_each_entry(heap, &dev->heaps, node) {
80 strncpy(hdata[i].name, heap->name, MAX_HEAP_NAME);
81 hdata[i].name[MAX_HEAP_NAME - 1] = '\0';
82 hdata[i].type = heap->type;
83 hdata[i].heap_id = heap->id;
84
85 i++;
86 if (i >= size)
87 break;
88 }
89
90 num_heaps = i;
91 out:
92 up_read(&dev->lock);
93 return num_heaps;
94 }
95 EXPORT_SYMBOL_GPL(ion_query_heaps_kernel);
96
ion_query_heaps(struct ion_heap_query * query)97 static int ion_query_heaps(struct ion_heap_query *query)
98 {
99 struct ion_device *dev = internal_dev;
100 struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps);
101 int ret = -EINVAL, cnt = 0, max_cnt;
102 struct ion_heap *heap;
103 struct ion_heap_data hdata;
104
105 memset(&hdata, 0, sizeof(hdata));
106
107 down_read(&dev->lock);
108 if (!buffer) {
109 query->cnt = dev->heap_cnt;
110 ret = 0;
111 goto out;
112 }
113
114 if (query->cnt <= 0)
115 goto out;
116
117 max_cnt = query->cnt;
118
119 plist_for_each_entry(heap, &dev->heaps, node) {
120 strncpy(hdata.name, heap->name, MAX_HEAP_NAME);
121 hdata.name[sizeof(hdata.name) - 1] = '\0';
122 hdata.type = heap->type;
123 hdata.heap_id = heap->id;
124
125 if (copy_to_user(&buffer[cnt], &hdata, sizeof(hdata))) {
126 ret = -EFAULT;
127 goto out;
128 }
129
130 cnt++;
131 if (cnt >= max_cnt)
132 break;
133 }
134
135 query->cnt = cnt;
136 ret = 0;
137 out:
138 up_read(&dev->lock);
139 return ret;
140 }
141
142 union ion_ioctl_arg {
143 struct ion_allocation_data allocation;
144 struct ion_heap_query query;
145 u32 ion_abi_version;
146 };
147
validate_ioctl_arg(unsigned int cmd,union ion_ioctl_arg * arg)148 static int validate_ioctl_arg(unsigned int cmd, union ion_ioctl_arg *arg)
149 {
150 switch (cmd) {
151 case ION_IOC_HEAP_QUERY:
152 if (arg->query.reserved0 ||
153 arg->query.reserved1 ||
154 arg->query.reserved2)
155 return -EINVAL;
156 break;
157 default:
158 break;
159 }
160
161 return 0;
162 }
163
ion_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)164 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
165 {
166 int ret = 0;
167 union ion_ioctl_arg data;
168
169 if (_IOC_SIZE(cmd) > sizeof(data))
170 return -EINVAL;
171
172 /*
173 * The copy_from_user is unconditional here for both read and write
174 * to do the validate. If there is no write for the ioctl, the
175 * buffer is cleared
176 */
177 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
178 return -EFAULT;
179
180 ret = validate_ioctl_arg(cmd, &data);
181 if (ret) {
182 pr_warn_once("%s: ioctl validate failed\n", __func__);
183 return ret;
184 }
185
186 if (!(_IOC_DIR(cmd) & _IOC_WRITE))
187 memset(&data, 0, sizeof(data));
188
189 switch (cmd) {
190 case ION_IOC_ALLOC:
191 {
192 int fd;
193
194 fd = ion_alloc_fd(data.allocation.len,
195 data.allocation.heap_id_mask,
196 data.allocation.flags);
197 if (fd < 0)
198 return fd;
199
200 data.allocation.fd = fd;
201
202 break;
203 }
204 case ION_IOC_HEAP_QUERY:
205 ret = ion_query_heaps(&data.query);
206 break;
207 case ION_IOC_ABI_VERSION:
208 data.ion_abi_version = ION_CURRENT_ABI_VERSION;
209 break;
210 default:
211 return -ENOTTY;
212 }
213
214 if (_IOC_DIR(cmd) & _IOC_READ) {
215 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd)))
216 return -EFAULT;
217 }
218 return ret;
219 }
220
221 static const struct file_operations ion_fops = {
222 .owner = THIS_MODULE,
223 .unlocked_ioctl = ion_ioctl,
224 #ifdef CONFIG_COMPAT
225 .compat_ioctl = ion_ioctl,
226 #endif
227 };
228
debug_shrink_set(void * data,u64 val)229 static int debug_shrink_set(void *data, u64 val)
230 {
231 struct ion_heap *heap = data;
232 struct shrink_control sc;
233 int objs;
234
235 sc.gfp_mask = GFP_HIGHUSER;
236 sc.nr_to_scan = val;
237
238 if (!val) {
239 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
240 sc.nr_to_scan = objs;
241 }
242
243 heap->shrinker.scan_objects(&heap->shrinker, &sc);
244 return 0;
245 }
246
debug_shrink_get(void * data,u64 * val)247 static int debug_shrink_get(void *data, u64 *val)
248 {
249 struct ion_heap *heap = data;
250 struct shrink_control sc;
251 int objs;
252
253 sc.gfp_mask = GFP_HIGHUSER;
254 sc.nr_to_scan = 0;
255
256 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
257 *val = objs;
258 return 0;
259 }
260
261 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
262 debug_shrink_set, "%llu\n");
263
ion_assign_heap_id(struct ion_heap * heap,struct ion_device * dev)264 static int ion_assign_heap_id(struct ion_heap *heap, struct ion_device *dev)
265 {
266 int id_bit = -EINVAL;
267 int start_bit = -1, end_bit = -1;
268
269 switch (heap->type) {
270 case ION_HEAP_TYPE_SYSTEM:
271 id_bit = __ffs(ION_HEAP_SYSTEM);
272 break;
273 case ION_HEAP_TYPE_DMA:
274 start_bit = __ffs(ION_HEAP_DMA_START);
275 end_bit = __ffs(ION_HEAP_DMA_END);
276 break;
277 case ION_HEAP_TYPE_CUSTOM ... ION_HEAP_TYPE_MAX:
278 start_bit = __ffs(ION_HEAP_CUSTOM_START);
279 end_bit = __ffs(ION_HEAP_CUSTOM_END);
280 break;
281 default:
282 return -EINVAL;
283 }
284
285 /* For carveout, dma & custom heaps, we first let the heaps choose their
286 * own IDs. This allows the old behaviour of knowing the heap ids
287 * of these type of heaps in advance in user space. If a heap with
288 * that ID already exists, it is an error.
289 *
290 * If the heap hasn't picked an id by itself, then we assign it
291 * one.
292 */
293 if (id_bit < 0) {
294 if (heap->id) {
295 id_bit = __ffs(heap->id);
296 if (id_bit < start_bit || id_bit > end_bit)
297 return -EINVAL;
298 } else {
299 id_bit = find_next_zero_bit(dev->heap_ids, end_bit + 1,
300 start_bit);
301 if (id_bit > end_bit)
302 return -ENOSPC;
303 }
304 }
305
306 if (test_and_set_bit(id_bit, dev->heap_ids))
307 return -EEXIST;
308 heap->id = id_bit;
309 dev->heap_cnt++;
310
311 return 0;
312 }
313
__ion_device_add_heap(struct ion_heap * heap,struct module * owner)314 int __ion_device_add_heap(struct ion_heap *heap, struct module *owner)
315 {
316 struct ion_device *dev = internal_dev;
317 int ret;
318 struct dentry *heap_root;
319 char debug_name[64];
320
321 if (!heap || !heap->ops || !heap->ops->allocate || !heap->ops->free) {
322 pr_err("%s: invalid heap or heap_ops\n", __func__);
323 ret = -EINVAL;
324 goto out;
325 }
326
327 heap->owner = owner;
328 spin_lock_init(&heap->free_lock);
329 spin_lock_init(&heap->stat_lock);
330 heap->free_list_size = 0;
331
332 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) {
333 ret = ion_heap_init_deferred_free(heap);
334 if (ret)
335 goto out_heap_cleanup;
336 }
337
338 if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink) {
339 ret = ion_heap_init_shrinker(heap);
340 if (ret) {
341 pr_err("%s: Failed to register shrinker\n", __func__);
342 goto out_heap_cleanup;
343 }
344 }
345
346 heap->num_of_buffers = 0;
347 heap->num_of_alloc_bytes = 0;
348 heap->alloc_bytes_wm = 0;
349
350 heap_root = debugfs_create_dir(heap->name, dev->debug_root);
351 debugfs_create_u64("num_of_buffers",
352 0444, heap_root,
353 &heap->num_of_buffers);
354 debugfs_create_u64("num_of_alloc_bytes",
355 0444,
356 heap_root,
357 &heap->num_of_alloc_bytes);
358 debugfs_create_u64("alloc_bytes_wm",
359 0444,
360 heap_root,
361 &heap->alloc_bytes_wm);
362
363 if (heap->shrinker.count_objects &&
364 heap->shrinker.scan_objects) {
365 snprintf(debug_name, 64, "%s_shrink", heap->name);
366 debugfs_create_file(debug_name,
367 0644,
368 heap_root,
369 heap,
370 &debug_shrink_fops);
371 }
372
373 heap->debugfs_dir = heap_root;
374 down_write(&dev->lock);
375 ret = ion_assign_heap_id(heap, dev);
376 if (ret) {
377 pr_err("%s: Failed to assign heap id for heap type %x\n",
378 __func__, heap->type);
379 up_write(&dev->lock);
380 goto out_debugfs_cleanup;
381 }
382
383 /*
384 * use negative heap->id to reverse the priority -- when traversing
385 * the list later attempt higher id numbers first
386 */
387 plist_node_init(&heap->node, -heap->id);
388 plist_add(&heap->node, &dev->heaps);
389
390 up_write(&dev->lock);
391
392 return 0;
393
394 out_debugfs_cleanup:
395 debugfs_remove_recursive(heap->debugfs_dir);
396 out_heap_cleanup:
397 ion_heap_cleanup(heap);
398 out:
399 return ret;
400 }
401 EXPORT_SYMBOL_GPL(__ion_device_add_heap);
402
ion_device_remove_heap(struct ion_heap * heap)403 void ion_device_remove_heap(struct ion_heap *heap)
404 {
405 struct ion_device *dev = internal_dev;
406
407 if (!heap) {
408 pr_err("%s: Invalid argument\n", __func__);
409 return;
410 }
411
412 // take semaphore and remove the heap from dev->heap list
413 down_write(&dev->lock);
414 /* So no new allocations can happen from this heap */
415 plist_del(&heap->node, &dev->heaps);
416 if (ion_heap_cleanup(heap) != 0) {
417 pr_warn("%s: failed to cleanup heap (%s)\n",
418 __func__, heap->name);
419 }
420 debugfs_remove_recursive(heap->debugfs_dir);
421 clear_bit(heap->id, dev->heap_ids);
422 dev->heap_cnt--;
423 up_write(&dev->lock);
424 }
425 EXPORT_SYMBOL_GPL(ion_device_remove_heap);
426
427 static ssize_t
total_heaps_kb_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)428 total_heaps_kb_show(struct kobject *kobj, struct kobj_attribute *attr,
429 char *buf)
430 {
431 return sprintf(buf, "%llu\n",
432 div_u64(ion_get_total_heap_bytes(), 1024));
433 }
434
435 static ssize_t
total_pools_kb_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)436 total_pools_kb_show(struct kobject *kobj, struct kobj_attribute *attr,
437 char *buf)
438 {
439 struct ion_device *dev = internal_dev;
440 struct ion_heap *heap;
441 u64 total_pages = 0;
442
443 down_read(&dev->lock);
444 plist_for_each_entry(heap, &dev->heaps, node)
445 if (heap->ops->get_pool_size)
446 total_pages += heap->ops->get_pool_size(heap);
447 up_read(&dev->lock);
448
449 return sprintf(buf, "%llu\n", total_pages * (PAGE_SIZE / 1024));
450 }
451
452 static struct kobj_attribute total_heaps_kb_attr =
453 __ATTR_RO(total_heaps_kb);
454
455 static struct kobj_attribute total_pools_kb_attr =
456 __ATTR_RO(total_pools_kb);
457
458 static struct attribute *ion_device_attrs[] = {
459 &total_heaps_kb_attr.attr,
460 &total_pools_kb_attr.attr,
461 NULL,
462 };
463
464 ATTRIBUTE_GROUPS(ion_device);
465
ion_init_sysfs(void)466 static int ion_init_sysfs(void)
467 {
468 struct kobject *ion_kobj;
469 int ret;
470
471 ion_kobj = kobject_create_and_add("ion", kernel_kobj);
472 if (!ion_kobj)
473 return -ENOMEM;
474
475 ret = sysfs_create_groups(ion_kobj, ion_device_groups);
476 if (ret) {
477 kobject_put(ion_kobj);
478 return ret;
479 }
480
481 return 0;
482 }
483
ion_device_create(void)484 static int ion_device_create(void)
485 {
486 struct ion_device *idev;
487 int ret;
488
489 idev = kzalloc(sizeof(*idev), GFP_KERNEL);
490 if (!idev)
491 return -ENOMEM;
492
493 idev->dev.minor = MISC_DYNAMIC_MINOR;
494 idev->dev.name = "ion";
495 idev->dev.fops = &ion_fops;
496 idev->dev.parent = NULL;
497 ret = misc_register(&idev->dev);
498 if (ret) {
499 pr_err("ion: failed to register misc device.\n");
500 goto err_reg;
501 }
502
503 ret = ion_init_sysfs();
504 if (ret) {
505 pr_err("ion: failed to add sysfs attributes.\n");
506 goto err_sysfs;
507 }
508
509 idev->debug_root = debugfs_create_dir("ion", NULL);
510 init_rwsem(&idev->lock);
511 plist_head_init(&idev->heaps);
512 internal_dev = idev;
513 return 0;
514
515 err_sysfs:
516 misc_deregister(&idev->dev);
517 err_reg:
518 kfree(idev);
519 return ret;
520 }
521 subsys_initcall(ion_device_create);
522