• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ION Memory Allocator
4  *
5  * Copyright (C) 2011 Google, Inc.
6  */
7 
8 #include <linux/debugfs.h>
9 #include <linux/device.h>
10 #include <linux/dma-buf.h>
11 #include <linux/err.h>
12 #include <linux/export.h>
13 #include <linux/file.h>
14 #include <linux/freezer.h>
15 #include <linux/fs.h>
16 #include <linux/kthread.h>
17 #include <linux/list.h>
18 #include <linux/miscdevice.h>
19 #include <linux/mm.h>
20 #include <linux/mm_types.h>
21 #include <linux/rbtree.h>
22 #include <linux/sched/task.h>
23 #include <linux/slab.h>
24 #include <linux/uaccess.h>
25 #include <linux/vmalloc.h>
26 
27 #include "ion.h"
28 
29 static struct ion_device *internal_dev;
30 static int heap_id;
31 
32 /* this function should only be called while dev->lock is held */
ion_buffer_create(struct ion_heap * heap,struct ion_device * dev,unsigned long len,unsigned long flags)33 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
34 					    struct ion_device *dev,
35 					    unsigned long len,
36 					    unsigned long flags)
37 {
38 	struct ion_buffer *buffer;
39 	int ret;
40 
41 	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
42 	if (!buffer)
43 		return ERR_PTR(-ENOMEM);
44 
45 	buffer->heap = heap;
46 	buffer->flags = flags;
47 	buffer->dev = dev;
48 	buffer->size = len;
49 
50 	ret = heap->ops->allocate(heap, buffer, len, flags);
51 
52 	if (ret) {
53 		if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
54 			goto err2;
55 
56 		ion_heap_freelist_drain(heap, 0);
57 		ret = heap->ops->allocate(heap, buffer, len, flags);
58 		if (ret)
59 			goto err2;
60 	}
61 
62 	if (!buffer->sg_table) {
63 		WARN_ONCE(1, "This heap needs to set the sgtable");
64 		ret = -EINVAL;
65 		goto err1;
66 	}
67 
68 	spin_lock(&heap->stat_lock);
69 	heap->num_of_buffers++;
70 	heap->num_of_alloc_bytes += len;
71 	if (heap->num_of_alloc_bytes > heap->alloc_bytes_wm)
72 		heap->alloc_bytes_wm = heap->num_of_alloc_bytes;
73 	spin_unlock(&heap->stat_lock);
74 
75 	INIT_LIST_HEAD(&buffer->attachments);
76 	mutex_init(&buffer->lock);
77 	return buffer;
78 
79 err1:
80 	heap->ops->free(buffer);
81 err2:
82 	kfree(buffer);
83 	return ERR_PTR(ret);
84 }
85 
ion_buffer_destroy(struct ion_buffer * buffer)86 void ion_buffer_destroy(struct ion_buffer *buffer)
87 {
88 	if (buffer->kmap_cnt > 0) {
89 		pr_warn_once("%s: buffer still mapped in the kernel\n",
90 			     __func__);
91 		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
92 	}
93 	buffer->heap->ops->free(buffer);
94 	spin_lock(&buffer->heap->stat_lock);
95 	buffer->heap->num_of_buffers--;
96 	buffer->heap->num_of_alloc_bytes -= buffer->size;
97 	spin_unlock(&buffer->heap->stat_lock);
98 
99 	kfree(buffer);
100 }
101 
_ion_buffer_destroy(struct ion_buffer * buffer)102 static void _ion_buffer_destroy(struct ion_buffer *buffer)
103 {
104 	struct ion_heap *heap = buffer->heap;
105 
106 	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
107 		ion_heap_freelist_add(heap, buffer);
108 	else
109 		ion_buffer_destroy(buffer);
110 }
111 
ion_buffer_kmap_get(struct ion_buffer * buffer)112 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
113 {
114 	void *vaddr;
115 
116 	if (buffer->kmap_cnt) {
117 		if (buffer->kmap_cnt == INT_MAX)
118 			return ERR_PTR(-EOVERFLOW);
119 
120 		buffer->kmap_cnt++;
121 		return buffer->vaddr;
122 	}
123 	vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
124 	if (WARN_ONCE(!vaddr,
125 		      "heap->ops->map_kernel should return ERR_PTR on error"))
126 		return ERR_PTR(-EINVAL);
127 	if (IS_ERR(vaddr))
128 		return vaddr;
129 	buffer->vaddr = vaddr;
130 	buffer->kmap_cnt++;
131 	return vaddr;
132 }
133 
ion_buffer_kmap_put(struct ion_buffer * buffer)134 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
135 {
136 	buffer->kmap_cnt--;
137 	if (!buffer->kmap_cnt) {
138 		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
139 		buffer->vaddr = NULL;
140 	}
141 }
142 
dup_sg_table(struct sg_table * table)143 static struct sg_table *dup_sg_table(struct sg_table *table)
144 {
145 	struct sg_table *new_table;
146 	int ret, i;
147 	struct scatterlist *sg, *new_sg;
148 
149 	new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
150 	if (!new_table)
151 		return ERR_PTR(-ENOMEM);
152 
153 	ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL);
154 	if (ret) {
155 		kfree(new_table);
156 		return ERR_PTR(-ENOMEM);
157 	}
158 
159 	new_sg = new_table->sgl;
160 	for_each_sgtable_sg(table, sg, i) {
161 		memcpy(new_sg, sg, sizeof(*sg));
162 		new_sg->dma_address = 0;
163 		new_sg = sg_next(new_sg);
164 	}
165 
166 	return new_table;
167 }
168 
free_duped_table(struct sg_table * table)169 static void free_duped_table(struct sg_table *table)
170 {
171 	sg_free_table(table);
172 	kfree(table);
173 }
174 
175 struct ion_dma_buf_attachment {
176 	struct device *dev;
177 	struct sg_table *table;
178 	struct list_head list;
179 };
180 
ion_dma_buf_attach(struct dma_buf * dmabuf,struct dma_buf_attachment * attachment)181 static int ion_dma_buf_attach(struct dma_buf *dmabuf,
182 			      struct dma_buf_attachment *attachment)
183 {
184 	struct ion_dma_buf_attachment *a;
185 	struct sg_table *table;
186 	struct ion_buffer *buffer = dmabuf->priv;
187 
188 	a = kzalloc(sizeof(*a), GFP_KERNEL);
189 	if (!a)
190 		return -ENOMEM;
191 
192 	table = dup_sg_table(buffer->sg_table);
193 	if (IS_ERR(table)) {
194 		kfree(a);
195 		return -ENOMEM;
196 	}
197 
198 	a->table = table;
199 	a->dev = attachment->dev;
200 	INIT_LIST_HEAD(&a->list);
201 
202 	attachment->priv = a;
203 
204 	mutex_lock(&buffer->lock);
205 	list_add(&a->list, &buffer->attachments);
206 	mutex_unlock(&buffer->lock);
207 
208 	return 0;
209 }
210 
ion_dma_buf_detach(struct dma_buf * dmabuf,struct dma_buf_attachment * attachment)211 static void ion_dma_buf_detach(struct dma_buf *dmabuf,
212 			       struct dma_buf_attachment *attachment)
213 {
214 	struct ion_dma_buf_attachment *a = attachment->priv;
215 	struct ion_buffer *buffer = dmabuf->priv;
216 
217 	mutex_lock(&buffer->lock);
218 	list_del(&a->list);
219 	mutex_unlock(&buffer->lock);
220 	free_duped_table(a->table);
221 
222 	kfree(a);
223 }
224 
ion_map_dma_buf(struct dma_buf_attachment * attachment,enum dma_data_direction direction)225 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
226 					enum dma_data_direction direction)
227 {
228 	struct ion_dma_buf_attachment *a = attachment->priv;
229 	struct sg_table *table;
230 	int ret;
231 
232 	table = a->table;
233 
234 	ret = dma_map_sgtable(attachment->dev, table, direction, 0);
235 	if (ret)
236 		return ERR_PTR(ret);
237 
238 	return table;
239 }
240 
ion_unmap_dma_buf(struct dma_buf_attachment * attachment,struct sg_table * table,enum dma_data_direction direction)241 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
242 			      struct sg_table *table,
243 			      enum dma_data_direction direction)
244 {
245 	dma_unmap_sgtable(attachment->dev, table, direction, 0);
246 }
247 
ion_mmap(struct dma_buf * dmabuf,struct vm_area_struct * vma)248 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
249 {
250 	struct ion_buffer *buffer = dmabuf->priv;
251 	int ret = 0;
252 
253 	if (!buffer->heap->ops->map_user) {
254 		pr_err("%s: this heap does not define a method for mapping to userspace\n",
255 		       __func__);
256 		return -EINVAL;
257 	}
258 
259 	if (!(buffer->flags & ION_FLAG_CACHED))
260 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
261 
262 	mutex_lock(&buffer->lock);
263 	/* now map it to userspace */
264 	ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
265 	mutex_unlock(&buffer->lock);
266 
267 	if (ret)
268 		pr_err("%s: failure mapping buffer to userspace\n",
269 		       __func__);
270 
271 	return ret;
272 }
273 
ion_dma_buf_release(struct dma_buf * dmabuf)274 static void ion_dma_buf_release(struct dma_buf *dmabuf)
275 {
276 	struct ion_buffer *buffer = dmabuf->priv;
277 
278 	_ion_buffer_destroy(buffer);
279 }
280 
ion_dma_buf_begin_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)281 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
282 					enum dma_data_direction direction)
283 {
284 	struct ion_buffer *buffer = dmabuf->priv;
285 	void *vaddr;
286 	struct ion_dma_buf_attachment *a;
287 	int ret = 0;
288 
289 	/*
290 	 * TODO: Move this elsewhere because we don't always need a vaddr
291 	 */
292 	if (buffer->heap->ops->map_kernel) {
293 		mutex_lock(&buffer->lock);
294 		vaddr = ion_buffer_kmap_get(buffer);
295 		if (IS_ERR(vaddr)) {
296 			ret = PTR_ERR(vaddr);
297 			goto unlock;
298 		}
299 		mutex_unlock(&buffer->lock);
300 	}
301 
302 	mutex_lock(&buffer->lock);
303 	list_for_each_entry(a, &buffer->attachments, list)
304 		dma_sync_sgtable_for_cpu(a->dev, a->table, direction);
305 
306 unlock:
307 	mutex_unlock(&buffer->lock);
308 	return ret;
309 }
310 
ion_dma_buf_end_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)311 static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
312 				      enum dma_data_direction direction)
313 {
314 	struct ion_buffer *buffer = dmabuf->priv;
315 	struct ion_dma_buf_attachment *a;
316 
317 	if (buffer->heap->ops->map_kernel) {
318 		mutex_lock(&buffer->lock);
319 		ion_buffer_kmap_put(buffer);
320 		mutex_unlock(&buffer->lock);
321 	}
322 
323 	mutex_lock(&buffer->lock);
324 	list_for_each_entry(a, &buffer->attachments, list)
325 		dma_sync_sgtable_for_device(a->dev, a->table, direction);
326 	mutex_unlock(&buffer->lock);
327 
328 	return 0;
329 }
330 
331 static const struct dma_buf_ops dma_buf_ops = {
332 	.map_dma_buf = ion_map_dma_buf,
333 	.unmap_dma_buf = ion_unmap_dma_buf,
334 	.mmap = ion_mmap,
335 	.release = ion_dma_buf_release,
336 	.attach = ion_dma_buf_attach,
337 	.detach = ion_dma_buf_detach,
338 	.begin_cpu_access = ion_dma_buf_begin_cpu_access,
339 	.end_cpu_access = ion_dma_buf_end_cpu_access,
340 };
341 
ion_alloc(size_t len,unsigned int heap_id_mask,unsigned int flags)342 static int ion_alloc(size_t len, unsigned int heap_id_mask, unsigned int flags)
343 {
344 	struct ion_device *dev = internal_dev;
345 	struct ion_buffer *buffer = NULL;
346 	struct ion_heap *heap;
347 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
348 	int fd;
349 	struct dma_buf *dmabuf;
350 
351 	pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__,
352 		 len, heap_id_mask, flags);
353 	/*
354 	 * traverse the list of heaps available in this system in priority
355 	 * order.  If the heap type is supported by the client, and matches the
356 	 * request of the caller allocate from it.  Repeat until allocate has
357 	 * succeeded or all heaps have been tried
358 	 */
359 	len = PAGE_ALIGN(len);
360 
361 	if (!len)
362 		return -EINVAL;
363 
364 	down_read(&dev->lock);
365 	plist_for_each_entry(heap, &dev->heaps, node) {
366 		/* if the caller didn't specify this heap id */
367 		if (!((1 << heap->id) & heap_id_mask))
368 			continue;
369 		buffer = ion_buffer_create(heap, dev, len, flags);
370 		if (!IS_ERR(buffer))
371 			break;
372 	}
373 	up_read(&dev->lock);
374 
375 	if (!buffer)
376 		return -ENODEV;
377 
378 	if (IS_ERR(buffer))
379 		return PTR_ERR(buffer);
380 
381 	exp_info.ops = &dma_buf_ops;
382 	exp_info.size = buffer->size;
383 	exp_info.flags = O_RDWR;
384 	exp_info.priv = buffer;
385 
386 	dmabuf = dma_buf_export(&exp_info);
387 	if (IS_ERR(dmabuf)) {
388 		_ion_buffer_destroy(buffer);
389 		return PTR_ERR(dmabuf);
390 	}
391 
392 	fd = dma_buf_fd(dmabuf, O_CLOEXEC);
393 	if (fd < 0)
394 		dma_buf_put(dmabuf);
395 
396 	return fd;
397 }
398 
ion_query_heaps(struct ion_heap_query * query)399 static int ion_query_heaps(struct ion_heap_query *query)
400 {
401 	struct ion_device *dev = internal_dev;
402 	struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps);
403 	int ret = -EINVAL, cnt = 0, max_cnt;
404 	struct ion_heap *heap;
405 	struct ion_heap_data hdata;
406 
407 	memset(&hdata, 0, sizeof(hdata));
408 
409 	down_read(&dev->lock);
410 	if (!buffer) {
411 		query->cnt = dev->heap_cnt;
412 		ret = 0;
413 		goto out;
414 	}
415 
416 	if (query->cnt <= 0)
417 		goto out;
418 
419 	max_cnt = query->cnt;
420 
421 	plist_for_each_entry(heap, &dev->heaps, node) {
422 		strncpy(hdata.name, heap->name, MAX_HEAP_NAME);
423 		hdata.name[sizeof(hdata.name) - 1] = '\0';
424 		hdata.type = heap->type;
425 		hdata.heap_id = heap->id;
426 
427 		if (copy_to_user(&buffer[cnt], &hdata, sizeof(hdata))) {
428 			ret = -EFAULT;
429 			goto out;
430 		}
431 
432 		cnt++;
433 		if (cnt >= max_cnt)
434 			break;
435 	}
436 
437 	query->cnt = cnt;
438 	ret = 0;
439 out:
440 	up_read(&dev->lock);
441 	return ret;
442 }
443 
444 union ion_ioctl_arg {
445 	struct ion_allocation_data allocation;
446 	struct ion_heap_query query;
447 };
448 
validate_ioctl_arg(unsigned int cmd,union ion_ioctl_arg * arg)449 static int validate_ioctl_arg(unsigned int cmd, union ion_ioctl_arg *arg)
450 {
451 	switch (cmd) {
452 	case ION_IOC_HEAP_QUERY:
453 		if (arg->query.reserved0 ||
454 		    arg->query.reserved1 ||
455 		    arg->query.reserved2)
456 			return -EINVAL;
457 		break;
458 	default:
459 		break;
460 	}
461 
462 	return 0;
463 }
464 
ion_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)465 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
466 {
467 	int ret = 0;
468 	union ion_ioctl_arg data;
469 
470 	if (_IOC_SIZE(cmd) > sizeof(data))
471 		return -EINVAL;
472 
473 	/*
474 	 * The copy_from_user is unconditional here for both read and write
475 	 * to do the validate. If there is no write for the ioctl, the
476 	 * buffer is cleared
477 	 */
478 	if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
479 		return -EFAULT;
480 
481 	ret = validate_ioctl_arg(cmd, &data);
482 	if (ret) {
483 		pr_warn_once("%s: ioctl validate failed\n", __func__);
484 		return ret;
485 	}
486 
487 	if (!(_IOC_DIR(cmd) & _IOC_WRITE))
488 		memset(&data, 0, sizeof(data));
489 
490 	switch (cmd) {
491 	case ION_IOC_ALLOC:
492 	{
493 		int fd;
494 
495 		fd = ion_alloc(data.allocation.len,
496 			       data.allocation.heap_id_mask,
497 			       data.allocation.flags);
498 		if (fd < 0)
499 			return fd;
500 
501 		data.allocation.fd = fd;
502 
503 		break;
504 	}
505 	case ION_IOC_HEAP_QUERY:
506 		ret = ion_query_heaps(&data.query);
507 		break;
508 	default:
509 		return -ENOTTY;
510 	}
511 
512 	if (_IOC_DIR(cmd) & _IOC_READ) {
513 		if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd)))
514 			return -EFAULT;
515 	}
516 	return ret;
517 }
518 
519 static const struct file_operations ion_fops = {
520 	.owner          = THIS_MODULE,
521 	.unlocked_ioctl = ion_ioctl,
522 	.compat_ioctl	= compat_ptr_ioctl,
523 };
524 
debug_shrink_set(void * data,u64 val)525 static int debug_shrink_set(void *data, u64 val)
526 {
527 	struct ion_heap *heap = data;
528 	struct shrink_control sc;
529 	int objs;
530 
531 	sc.gfp_mask = GFP_HIGHUSER;
532 	sc.nr_to_scan = val;
533 
534 	if (!val) {
535 		objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
536 		sc.nr_to_scan = objs;
537 	}
538 
539 	heap->shrinker.scan_objects(&heap->shrinker, &sc);
540 	return 0;
541 }
542 
debug_shrink_get(void * data,u64 * val)543 static int debug_shrink_get(void *data, u64 *val)
544 {
545 	struct ion_heap *heap = data;
546 	struct shrink_control sc;
547 	int objs;
548 
549 	sc.gfp_mask = GFP_HIGHUSER;
550 	sc.nr_to_scan = 0;
551 
552 	objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
553 	*val = objs;
554 	return 0;
555 }
556 
557 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
558 			debug_shrink_set, "%llu\n");
559 
ion_device_add_heap(struct ion_heap * heap)560 void ion_device_add_heap(struct ion_heap *heap)
561 {
562 	struct ion_device *dev = internal_dev;
563 	int ret;
564 	struct dentry *heap_root;
565 	char debug_name[64];
566 
567 	if (!heap->ops->allocate || !heap->ops->free)
568 		pr_err("%s: can not add heap with invalid ops struct.\n",
569 		       __func__);
570 
571 	spin_lock_init(&heap->free_lock);
572 	spin_lock_init(&heap->stat_lock);
573 	heap->free_list_size = 0;
574 
575 	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
576 		ion_heap_init_deferred_free(heap);
577 
578 	if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink) {
579 		ret = ion_heap_init_shrinker(heap);
580 		if (ret)
581 			pr_err("%s: Failed to register shrinker\n", __func__);
582 	}
583 
584 	heap->dev = dev;
585 	heap->num_of_buffers = 0;
586 	heap->num_of_alloc_bytes = 0;
587 	heap->alloc_bytes_wm = 0;
588 
589 	heap_root = debugfs_create_dir(heap->name, dev->debug_root);
590 	debugfs_create_u64("num_of_buffers",
591 			   0444, heap_root,
592 			   &heap->num_of_buffers);
593 	debugfs_create_u64("num_of_alloc_bytes",
594 			   0444,
595 			   heap_root,
596 			   &heap->num_of_alloc_bytes);
597 	debugfs_create_u64("alloc_bytes_wm",
598 			   0444,
599 			   heap_root,
600 			   &heap->alloc_bytes_wm);
601 
602 	if (heap->shrinker.count_objects &&
603 	    heap->shrinker.scan_objects) {
604 		snprintf(debug_name, 64, "%s_shrink", heap->name);
605 		debugfs_create_file(debug_name,
606 				    0644,
607 				    heap_root,
608 				    heap,
609 				    &debug_shrink_fops);
610 	}
611 
612 	down_write(&dev->lock);
613 	heap->id = heap_id++;
614 	/*
615 	 * use negative heap->id to reverse the priority -- when traversing
616 	 * the list later attempt higher id numbers first
617 	 */
618 	plist_node_init(&heap->node, -heap->id);
619 	plist_add(&heap->node, &dev->heaps);
620 
621 	dev->heap_cnt++;
622 	up_write(&dev->lock);
623 }
624 EXPORT_SYMBOL(ion_device_add_heap);
625 
ion_device_create(void)626 static int ion_device_create(void)
627 {
628 	struct ion_device *idev;
629 	int ret;
630 
631 	idev = kzalloc(sizeof(*idev), GFP_KERNEL);
632 	if (!idev)
633 		return -ENOMEM;
634 
635 	idev->dev.minor = MISC_DYNAMIC_MINOR;
636 	idev->dev.name = "ion";
637 	idev->dev.fops = &ion_fops;
638 	idev->dev.parent = NULL;
639 	ret = misc_register(&idev->dev);
640 	if (ret) {
641 		pr_err("ion: failed to register misc device.\n");
642 		kfree(idev);
643 		return ret;
644 	}
645 
646 	idev->debug_root = debugfs_create_dir("ion", NULL);
647 	init_rwsem(&idev->lock);
648 	plist_head_init(&idev->heaps);
649 	internal_dev = idev;
650 	return 0;
651 }
652 subsys_initcall(ion_device_create);
653