• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *
3  * drivers/staging/android/ion/ion.c
4  *
5  * Copyright (C) 2011 Google, Inc.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17 
18 #include <linux/atomic.h>
19 #include <linux/device.h>
20 #include <linux/err.h>
21 #include <linux/file.h>
22 #include <linux/freezer.h>
23 #include <linux/fs.h>
24 #include <linux/anon_inodes.h>
25 #include <linux/kthread.h>
26 #include <linux/list.h>
27 #include <linux/memblock.h>
28 #include <linux/miscdevice.h>
29 #include <linux/export.h>
30 #include <linux/mm.h>
31 #include <linux/mm_types.h>
32 #include <linux/rbtree.h>
33 #include <linux/slab.h>
34 #include <linux/seq_file.h>
35 #include <linux/uaccess.h>
36 #include <linux/vmalloc.h>
37 #include <linux/debugfs.h>
38 #include <linux/dma-buf.h>
39 #include <linux/idr.h>
40 
41 #include "ion.h"
42 #include "ion_priv.h"
43 #include "compat_ion.h"
44 
45 /**
46  * struct ion_device - the metadata of the ion device node
47  * @dev:		the actual misc device
48  * @buffers:		an rb tree of all the existing buffers
49  * @buffer_lock:	lock protecting the tree of buffers
50  * @lock:		rwsem protecting the tree of heaps and clients
51  * @heaps:		list of all the heaps in the system
52  * @user_clients:	list of all the clients created from userspace
53  */
54 struct ion_device {
55 	struct miscdevice dev;
56 	struct rb_root buffers;
57 	struct mutex buffer_lock;
58 	struct rw_semaphore lock;
59 	struct plist_head heaps;
60 	long (*custom_ioctl)(struct ion_client *client, unsigned int cmd,
61 			     unsigned long arg);
62 	struct rb_root clients;
63 	struct dentry *debug_root;
64 	struct dentry *heaps_debug_root;
65 	struct dentry *clients_debug_root;
66 };
67 
68 /**
69  * struct ion_client - a process/hw block local address space
70  * @node:		node in the tree of all clients
71  * @dev:		backpointer to ion device
72  * @handles:		an rb tree of all the handles in this client
73  * @idr:		an idr space for allocating handle ids
74  * @lock:		lock protecting the tree of handles
75  * @name:		used for debugging
76  * @display_name:	used for debugging (unique version of @name)
77  * @display_serial:	used for debugging (to make display_name unique)
78  * @task:		used for debugging
79  *
80  * A client represents a list of buffers this client may access.
81  * The mutex stored here is used to protect both handles tree
82  * as well as the handles themselves, and should be held while modifying either.
83  */
84 struct ion_client {
85 	struct rb_node node;
86 	struct ion_device *dev;
87 	struct rb_root handles;
88 	struct idr idr;
89 	struct mutex lock;
90 	const char *name;
91 	char *display_name;
92 	int display_serial;
93 	struct task_struct *task;
94 	pid_t pid;
95 	struct dentry *debug_root;
96 };
97 
98 /**
99  * ion_handle - a client local reference to a buffer
100  * @ref:		reference count
101  * @client:		back pointer to the client the buffer resides in
102  * @buffer:		pointer to the buffer
103  * @node:		node in the client's handle rbtree
104  * @kmap_cnt:		count of times this client has mapped to kernel
105  * @id:			client-unique id allocated by client->idr
106  *
107  * Modifications to node, map_cnt or mapping should be protected by the
108  * lock in the client.  Other fields are never changed after initialization.
109  */
110 struct ion_handle {
111 	struct kref ref;
112 	struct ion_client *client;
113 	struct ion_buffer *buffer;
114 	struct rb_node node;
115 	unsigned int kmap_cnt;
116 	int id;
117 };
118 
ion_buffer_fault_user_mappings(struct ion_buffer * buffer)119 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
120 {
121 	return (buffer->flags & ION_FLAG_CACHED) &&
122 		!(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
123 }
124 
ion_buffer_cached(struct ion_buffer * buffer)125 bool ion_buffer_cached(struct ion_buffer *buffer)
126 {
127 	return !!(buffer->flags & ION_FLAG_CACHED);
128 }
129 
ion_buffer_page(struct page * page)130 static inline struct page *ion_buffer_page(struct page *page)
131 {
132 	return (struct page *)((unsigned long)page & ~(1UL));
133 }
134 
ion_buffer_page_is_dirty(struct page * page)135 static inline bool ion_buffer_page_is_dirty(struct page *page)
136 {
137 	return !!((unsigned long)page & 1UL);
138 }
139 
ion_buffer_page_dirty(struct page ** page)140 static inline void ion_buffer_page_dirty(struct page **page)
141 {
142 	*page = (struct page *)((unsigned long)(*page) | 1UL);
143 }
144 
ion_buffer_page_clean(struct page ** page)145 static inline void ion_buffer_page_clean(struct page **page)
146 {
147 	*page = (struct page *)((unsigned long)(*page) & ~(1UL));
148 }
149 
150 /* this function should only be called while dev->lock is held */
ion_buffer_add(struct ion_device * dev,struct ion_buffer * buffer)151 static void ion_buffer_add(struct ion_device *dev,
152 			   struct ion_buffer *buffer)
153 {
154 	struct rb_node **p = &dev->buffers.rb_node;
155 	struct rb_node *parent = NULL;
156 	struct ion_buffer *entry;
157 
158 	while (*p) {
159 		parent = *p;
160 		entry = rb_entry(parent, struct ion_buffer, node);
161 
162 		if (buffer < entry) {
163 			p = &(*p)->rb_left;
164 		} else if (buffer > entry) {
165 			p = &(*p)->rb_right;
166 		} else {
167 			pr_err("%s: buffer already found.", __func__);
168 			BUG();
169 		}
170 	}
171 
172 	rb_link_node(&buffer->node, parent, p);
173 	rb_insert_color(&buffer->node, &dev->buffers);
174 }
175 
176 /* this function should only be called while dev->lock is held */
ion_buffer_create(struct ion_heap * heap,struct ion_device * dev,unsigned long len,unsigned long align,unsigned long flags)177 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
178 				     struct ion_device *dev,
179 				     unsigned long len,
180 				     unsigned long align,
181 				     unsigned long flags)
182 {
183 	struct ion_buffer *buffer;
184 	struct sg_table *table;
185 	struct scatterlist *sg;
186 	int i, ret;
187 
188 	buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
189 	if (!buffer)
190 		return ERR_PTR(-ENOMEM);
191 
192 	buffer->heap = heap;
193 	buffer->flags = flags;
194 	kref_init(&buffer->ref);
195 
196 	ret = heap->ops->allocate(heap, buffer, len, align, flags);
197 
198 	if (ret) {
199 		if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
200 			goto err2;
201 
202 		ion_heap_freelist_drain(heap, 0);
203 		ret = heap->ops->allocate(heap, buffer, len, align,
204 					  flags);
205 		if (ret)
206 			goto err2;
207 	}
208 
209 	buffer->dev = dev;
210 	buffer->size = len;
211 
212 	table = heap->ops->map_dma(heap, buffer);
213 	if (WARN_ONCE(table == NULL,
214 			"heap->ops->map_dma should return ERR_PTR on error"))
215 		table = ERR_PTR(-EINVAL);
216 	if (IS_ERR(table)) {
217 		ret = -EINVAL;
218 		goto err1;
219 	}
220 
221 	buffer->sg_table = table;
222 	if (ion_buffer_fault_user_mappings(buffer)) {
223 		int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
224 		struct scatterlist *sg;
225 		int i, j, k = 0;
226 
227 		buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
228 		if (!buffer->pages) {
229 			ret = -ENOMEM;
230 			goto err;
231 		}
232 
233 		for_each_sg(table->sgl, sg, table->nents, i) {
234 			struct page *page = sg_page(sg);
235 
236 			for (j = 0; j < sg->length / PAGE_SIZE; j++)
237 				buffer->pages[k++] = page++;
238 		}
239 	}
240 
241 	buffer->dev = dev;
242 	buffer->size = len;
243 	INIT_LIST_HEAD(&buffer->vmas);
244 	mutex_init(&buffer->lock);
245 	/*
246 	 * this will set up dma addresses for the sglist -- it is not
247 	 * technically correct as per the dma api -- a specific
248 	 * device isn't really taking ownership here.  However, in practice on
249 	 * our systems the only dma_address space is physical addresses.
250 	 * Additionally, we can't afford the overhead of invalidating every
251 	 * allocation via dma_map_sg. The implicit contract here is that
252 	 * memory coming from the heaps is ready for dma, ie if it has a
253 	 * cached mapping that mapping has been invalidated
254 	 */
255 	for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
256 		sg_dma_address(sg) = sg_phys(sg);
257 		sg_dma_len(sg) = sg->length;
258 	}
259 	mutex_lock(&dev->buffer_lock);
260 	ion_buffer_add(dev, buffer);
261 	mutex_unlock(&dev->buffer_lock);
262 	return buffer;
263 
264 err:
265 	heap->ops->unmap_dma(heap, buffer);
266 err1:
267 	heap->ops->free(buffer);
268 err2:
269 	kfree(buffer);
270 	return ERR_PTR(ret);
271 }
272 
ion_buffer_destroy(struct ion_buffer * buffer)273 void ion_buffer_destroy(struct ion_buffer *buffer)
274 {
275 	if (WARN_ON(buffer->kmap_cnt > 0))
276 		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
277 	buffer->heap->ops->unmap_dma(buffer->heap, buffer);
278 	buffer->heap->ops->free(buffer);
279 	vfree(buffer->pages);
280 	kfree(buffer);
281 }
282 
_ion_buffer_destroy(struct kref * kref)283 static void _ion_buffer_destroy(struct kref *kref)
284 {
285 	struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
286 	struct ion_heap *heap = buffer->heap;
287 	struct ion_device *dev = buffer->dev;
288 
289 	mutex_lock(&dev->buffer_lock);
290 	rb_erase(&buffer->node, &dev->buffers);
291 	mutex_unlock(&dev->buffer_lock);
292 
293 	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
294 		ion_heap_freelist_add(heap, buffer);
295 	else
296 		ion_buffer_destroy(buffer);
297 }
298 
ion_buffer_get(struct ion_buffer * buffer)299 static void ion_buffer_get(struct ion_buffer *buffer)
300 {
301 	kref_get(&buffer->ref);
302 }
303 
ion_buffer_put(struct ion_buffer * buffer)304 static int ion_buffer_put(struct ion_buffer *buffer)
305 {
306 	return kref_put(&buffer->ref, _ion_buffer_destroy);
307 }
308 
ion_buffer_add_to_handle(struct ion_buffer * buffer)309 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
310 {
311 	mutex_lock(&buffer->lock);
312 	buffer->handle_count++;
313 	mutex_unlock(&buffer->lock);
314 }
315 
ion_buffer_remove_from_handle(struct ion_buffer * buffer)316 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
317 {
318 	/*
319 	 * when a buffer is removed from a handle, if it is not in
320 	 * any other handles, copy the taskcomm and the pid of the
321 	 * process it's being removed from into the buffer.  At this
322 	 * point there will be no way to track what processes this buffer is
323 	 * being used by, it only exists as a dma_buf file descriptor.
324 	 * The taskcomm and pid can provide a debug hint as to where this fd
325 	 * is in the system
326 	 */
327 	mutex_lock(&buffer->lock);
328 	buffer->handle_count--;
329 	BUG_ON(buffer->handle_count < 0);
330 	if (!buffer->handle_count) {
331 		struct task_struct *task;
332 
333 		task = current->group_leader;
334 		get_task_comm(buffer->task_comm, task);
335 		buffer->pid = task_pid_nr(task);
336 	}
337 	mutex_unlock(&buffer->lock);
338 }
339 
ion_handle_create(struct ion_client * client,struct ion_buffer * buffer)340 static struct ion_handle *ion_handle_create(struct ion_client *client,
341 				     struct ion_buffer *buffer)
342 {
343 	struct ion_handle *handle;
344 
345 	handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
346 	if (!handle)
347 		return ERR_PTR(-ENOMEM);
348 	kref_init(&handle->ref);
349 	RB_CLEAR_NODE(&handle->node);
350 	handle->client = client;
351 	ion_buffer_get(buffer);
352 	ion_buffer_add_to_handle(buffer);
353 	handle->buffer = buffer;
354 
355 	return handle;
356 }
357 
358 static void ion_handle_kmap_put(struct ion_handle *);
359 
ion_handle_destroy(struct kref * kref)360 static void ion_handle_destroy(struct kref *kref)
361 {
362 	struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
363 	struct ion_client *client = handle->client;
364 	struct ion_buffer *buffer = handle->buffer;
365 
366 	mutex_lock(&buffer->lock);
367 	while (handle->kmap_cnt)
368 		ion_handle_kmap_put(handle);
369 	mutex_unlock(&buffer->lock);
370 
371 	idr_remove(&client->idr, handle->id);
372 	if (!RB_EMPTY_NODE(&handle->node))
373 		rb_erase(&handle->node, &client->handles);
374 
375 	ion_buffer_remove_from_handle(buffer);
376 	ion_buffer_put(buffer);
377 
378 	kfree(handle);
379 }
380 
ion_handle_buffer(struct ion_handle * handle)381 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
382 {
383 	return handle->buffer;
384 }
385 
ion_handle_get(struct ion_handle * handle)386 static void ion_handle_get(struct ion_handle *handle)
387 {
388 	kref_get(&handle->ref);
389 }
390 
391 /* Must hold the client lock */
ion_handle_get_check_overflow(struct ion_handle * handle)392 static struct ion_handle *ion_handle_get_check_overflow(
393 					struct ion_handle *handle)
394 {
395 	if (atomic_read(&handle->ref.refcount) + 1 == 0)
396 		return ERR_PTR(-EOVERFLOW);
397 	ion_handle_get(handle);
398 	return handle;
399 }
400 
ion_handle_put_nolock(struct ion_handle * handle)401 static int ion_handle_put_nolock(struct ion_handle *handle)
402 {
403 	int ret;
404 
405 	ret = kref_put(&handle->ref, ion_handle_destroy);
406 
407 	return ret;
408 }
409 
ion_handle_put(struct ion_handle * handle)410 int ion_handle_put(struct ion_handle *handle)
411 {
412 	struct ion_client *client = handle->client;
413 	int ret;
414 
415 	mutex_lock(&client->lock);
416 	ret = ion_handle_put_nolock(handle);
417 	mutex_unlock(&client->lock);
418 
419 	return ret;
420 }
421 
ion_handle_lookup(struct ion_client * client,struct ion_buffer * buffer)422 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
423 					    struct ion_buffer *buffer)
424 {
425 	struct rb_node *n = client->handles.rb_node;
426 
427 	while (n) {
428 		struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
429 
430 		if (buffer < entry->buffer)
431 			n = n->rb_left;
432 		else if (buffer > entry->buffer)
433 			n = n->rb_right;
434 		else
435 			return entry;
436 	}
437 	return ERR_PTR(-EINVAL);
438 }
439 
ion_handle_get_by_id_nolock(struct ion_client * client,int id)440 static struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
441 						int id)
442 {
443 	struct ion_handle *handle;
444 
445 	handle = idr_find(&client->idr, id);
446 	if (handle)
447 		return ion_handle_get_check_overflow(handle);
448 
449 	return ERR_PTR(-EINVAL);
450 }
451 
ion_handle_validate(struct ion_client * client,struct ion_handle * handle)452 static bool ion_handle_validate(struct ion_client *client,
453 				struct ion_handle *handle)
454 {
455 	WARN_ON(!mutex_is_locked(&client->lock));
456 	return idr_find(&client->idr, handle->id) == handle;
457 }
458 
ion_handle_add(struct ion_client * client,struct ion_handle * handle)459 static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
460 {
461 	int id;
462 	struct rb_node **p = &client->handles.rb_node;
463 	struct rb_node *parent = NULL;
464 	struct ion_handle *entry;
465 
466 	id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
467 	if (id < 0)
468 		return id;
469 
470 	handle->id = id;
471 
472 	while (*p) {
473 		parent = *p;
474 		entry = rb_entry(parent, struct ion_handle, node);
475 
476 		if (handle->buffer < entry->buffer)
477 			p = &(*p)->rb_left;
478 		else if (handle->buffer > entry->buffer)
479 			p = &(*p)->rb_right;
480 		else
481 			WARN(1, "%s: buffer already found.", __func__);
482 	}
483 
484 	rb_link_node(&handle->node, parent, p);
485 	rb_insert_color(&handle->node, &client->handles);
486 
487 	return 0;
488 }
489 
ion_alloc(struct ion_client * client,size_t len,size_t align,unsigned int heap_id_mask,unsigned int flags)490 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
491 			     size_t align, unsigned int heap_id_mask,
492 			     unsigned int flags)
493 {
494 	struct ion_handle *handle;
495 	struct ion_device *dev = client->dev;
496 	struct ion_buffer *buffer = NULL;
497 	struct ion_heap *heap;
498 	int ret;
499 
500 	pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
501 		 len, align, heap_id_mask, flags);
502 	/*
503 	 * traverse the list of heaps available in this system in priority
504 	 * order.  If the heap type is supported by the client, and matches the
505 	 * request of the caller allocate from it.  Repeat until allocate has
506 	 * succeeded or all heaps have been tried
507 	 */
508 	len = PAGE_ALIGN(len);
509 
510 	if (!len)
511 		return ERR_PTR(-EINVAL);
512 
513 	down_read(&dev->lock);
514 	plist_for_each_entry(heap, &dev->heaps, node) {
515 		/* if the caller didn't specify this heap id */
516 		if (!((1 << heap->id) & heap_id_mask))
517 			continue;
518 		buffer = ion_buffer_create(heap, dev, len, align, flags);
519 		if (!IS_ERR(buffer))
520 			break;
521 	}
522 	up_read(&dev->lock);
523 
524 	if (buffer == NULL)
525 		return ERR_PTR(-ENODEV);
526 
527 	if (IS_ERR(buffer))
528 		return ERR_CAST(buffer);
529 
530 	handle = ion_handle_create(client, buffer);
531 
532 	/*
533 	 * ion_buffer_create will create a buffer with a ref_cnt of 1,
534 	 * and ion_handle_create will take a second reference, drop one here
535 	 */
536 	ion_buffer_put(buffer);
537 
538 	if (IS_ERR(handle))
539 		return handle;
540 
541 	mutex_lock(&client->lock);
542 	ret = ion_handle_add(client, handle);
543 	mutex_unlock(&client->lock);
544 	if (ret) {
545 		ion_handle_put(handle);
546 		handle = ERR_PTR(ret);
547 	}
548 
549 	return handle;
550 }
551 EXPORT_SYMBOL(ion_alloc);
552 
ion_free_nolock(struct ion_client * client,struct ion_handle * handle)553 static void ion_free_nolock(struct ion_client *client, struct ion_handle *handle)
554 {
555 	bool valid_handle;
556 
557 	BUG_ON(client != handle->client);
558 
559 	valid_handle = ion_handle_validate(client, handle);
560 
561 	if (!valid_handle) {
562 		WARN(1, "%s: invalid handle passed to free.\n", __func__);
563 		return;
564 	}
565 	ion_handle_put_nolock(handle);
566 }
567 
ion_free(struct ion_client * client,struct ion_handle * handle)568 void ion_free(struct ion_client *client, struct ion_handle *handle)
569 {
570 	BUG_ON(client != handle->client);
571 
572 	mutex_lock(&client->lock);
573 	ion_free_nolock(client, handle);
574 	mutex_unlock(&client->lock);
575 }
576 EXPORT_SYMBOL(ion_free);
577 
ion_phys(struct ion_client * client,struct ion_handle * handle,ion_phys_addr_t * addr,size_t * len)578 int ion_phys(struct ion_client *client, struct ion_handle *handle,
579 	     ion_phys_addr_t *addr, size_t *len)
580 {
581 	struct ion_buffer *buffer;
582 	int ret;
583 
584 	mutex_lock(&client->lock);
585 	if (!ion_handle_validate(client, handle)) {
586 		mutex_unlock(&client->lock);
587 		return -EINVAL;
588 	}
589 
590 	buffer = handle->buffer;
591 
592 	if (!buffer->heap->ops->phys) {
593 		pr_err("%s: ion_phys is not implemented by this heap (name=%s, type=%d).\n",
594 			__func__, buffer->heap->name, buffer->heap->type);
595 		mutex_unlock(&client->lock);
596 		return -ENODEV;
597 	}
598 	mutex_unlock(&client->lock);
599 	ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
600 	return ret;
601 }
602 EXPORT_SYMBOL(ion_phys);
603 
ion_buffer_kmap_get(struct ion_buffer * buffer)604 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
605 {
606 	void *vaddr;
607 
608 	if (buffer->kmap_cnt) {
609 		if (buffer->kmap_cnt == INT_MAX)
610 			return ERR_PTR(-EOVERFLOW);
611 
612 		buffer->kmap_cnt++;
613 		return buffer->vaddr;
614 	}
615 	vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
616 	if (WARN_ONCE(vaddr == NULL,
617 			"heap->ops->map_kernel should return ERR_PTR on error"))
618 		return ERR_PTR(-EINVAL);
619 	if (IS_ERR(vaddr))
620 		return vaddr;
621 	buffer->vaddr = vaddr;
622 	buffer->kmap_cnt++;
623 	return vaddr;
624 }
625 
ion_handle_kmap_get(struct ion_handle * handle)626 static void *ion_handle_kmap_get(struct ion_handle *handle)
627 {
628 	struct ion_buffer *buffer = handle->buffer;
629 	void *vaddr;
630 
631 	if (handle->kmap_cnt) {
632 		if (handle->kmap_cnt == INT_MAX)
633 			return ERR_PTR(-EOVERFLOW);
634 
635 		handle->kmap_cnt++;
636 		return buffer->vaddr;
637 	}
638 	vaddr = ion_buffer_kmap_get(buffer);
639 	if (IS_ERR(vaddr))
640 		return vaddr;
641 	handle->kmap_cnt++;
642 	return vaddr;
643 }
644 
ion_buffer_kmap_put(struct ion_buffer * buffer)645 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
646 {
647 	buffer->kmap_cnt--;
648 	if (!buffer->kmap_cnt) {
649 		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
650 		buffer->vaddr = NULL;
651 	}
652 }
653 
ion_handle_kmap_put(struct ion_handle * handle)654 static void ion_handle_kmap_put(struct ion_handle *handle)
655 {
656 	struct ion_buffer *buffer = handle->buffer;
657 
658 	if (!handle->kmap_cnt) {
659 		WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
660 		return;
661 	}
662 	handle->kmap_cnt--;
663 	if (!handle->kmap_cnt)
664 		ion_buffer_kmap_put(buffer);
665 }
666 
ion_map_kernel(struct ion_client * client,struct ion_handle * handle)667 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
668 {
669 	struct ion_buffer *buffer;
670 	void *vaddr;
671 
672 	mutex_lock(&client->lock);
673 	if (!ion_handle_validate(client, handle)) {
674 		pr_err("%s: invalid handle passed to map_kernel.\n",
675 		       __func__);
676 		mutex_unlock(&client->lock);
677 		return ERR_PTR(-EINVAL);
678 	}
679 
680 	buffer = handle->buffer;
681 
682 	if (!handle->buffer->heap->ops->map_kernel) {
683 		pr_err("%s: map_kernel is not implemented by this heap.\n",
684 		       __func__);
685 		mutex_unlock(&client->lock);
686 		return ERR_PTR(-ENODEV);
687 	}
688 
689 	mutex_lock(&buffer->lock);
690 	vaddr = ion_handle_kmap_get(handle);
691 	mutex_unlock(&buffer->lock);
692 	mutex_unlock(&client->lock);
693 	return vaddr;
694 }
695 EXPORT_SYMBOL(ion_map_kernel);
696 
ion_unmap_kernel(struct ion_client * client,struct ion_handle * handle)697 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
698 {
699 	struct ion_buffer *buffer;
700 
701 	mutex_lock(&client->lock);
702 	buffer = handle->buffer;
703 	mutex_lock(&buffer->lock);
704 	ion_handle_kmap_put(handle);
705 	mutex_unlock(&buffer->lock);
706 	mutex_unlock(&client->lock);
707 }
708 EXPORT_SYMBOL(ion_unmap_kernel);
709 
ion_debug_client_show(struct seq_file * s,void * unused)710 static int ion_debug_client_show(struct seq_file *s, void *unused)
711 {
712 	struct ion_client *client = s->private;
713 	struct rb_node *n;
714 	size_t sizes[ION_NUM_HEAP_IDS] = {0};
715 	const char *names[ION_NUM_HEAP_IDS] = {NULL};
716 	int i;
717 
718 	mutex_lock(&client->lock);
719 	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
720 		struct ion_handle *handle = rb_entry(n, struct ion_handle,
721 						     node);
722 		unsigned int id = handle->buffer->heap->id;
723 
724 		if (!names[id])
725 			names[id] = handle->buffer->heap->name;
726 		sizes[id] += handle->buffer->size;
727 	}
728 	mutex_unlock(&client->lock);
729 
730 	seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
731 	for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
732 		if (!names[i])
733 			continue;
734 		seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
735 	}
736 	return 0;
737 }
738 
ion_debug_client_open(struct inode * inode,struct file * file)739 static int ion_debug_client_open(struct inode *inode, struct file *file)
740 {
741 	return single_open(file, ion_debug_client_show, inode->i_private);
742 }
743 
744 static const struct file_operations debug_client_fops = {
745 	.open = ion_debug_client_open,
746 	.read = seq_read,
747 	.llseek = seq_lseek,
748 	.release = single_release,
749 };
750 
ion_get_client_serial(const struct rb_root * root,const unsigned char * name)751 static int ion_get_client_serial(const struct rb_root *root,
752 					const unsigned char *name)
753 {
754 	int serial = -1;
755 	struct rb_node *node;
756 
757 	for (node = rb_first(root); node; node = rb_next(node)) {
758 		struct ion_client *client = rb_entry(node, struct ion_client,
759 						node);
760 
761 		if (strcmp(client->name, name))
762 			continue;
763 		serial = max(serial, client->display_serial);
764 	}
765 	return serial + 1;
766 }
767 
ion_client_create(struct ion_device * dev,const char * name)768 struct ion_client *ion_client_create(struct ion_device *dev,
769 				     const char *name)
770 {
771 	struct ion_client *client;
772 	struct task_struct *task;
773 	struct rb_node **p;
774 	struct rb_node *parent = NULL;
775 	struct ion_client *entry;
776 	pid_t pid;
777 
778 	if (!name) {
779 		pr_err("%s: Name cannot be null\n", __func__);
780 		return ERR_PTR(-EINVAL);
781 	}
782 
783 	get_task_struct(current->group_leader);
784 	task_lock(current->group_leader);
785 	pid = task_pid_nr(current->group_leader);
786 	/*
787 	 * don't bother to store task struct for kernel threads,
788 	 * they can't be killed anyway
789 	 */
790 	if (current->group_leader->flags & PF_KTHREAD) {
791 		put_task_struct(current->group_leader);
792 		task = NULL;
793 	} else {
794 		task = current->group_leader;
795 	}
796 	task_unlock(current->group_leader);
797 
798 	client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
799 	if (!client)
800 		goto err_put_task_struct;
801 
802 	client->dev = dev;
803 	client->handles = RB_ROOT;
804 	idr_init(&client->idr);
805 	mutex_init(&client->lock);
806 	client->task = task;
807 	client->pid = pid;
808 	client->name = kstrdup(name, GFP_KERNEL);
809 	if (!client->name)
810 		goto err_free_client;
811 
812 	down_write(&dev->lock);
813 	client->display_serial = ion_get_client_serial(&dev->clients, name);
814 	client->display_name = kasprintf(
815 		GFP_KERNEL, "%s-%d", name, client->display_serial);
816 	if (!client->display_name) {
817 		up_write(&dev->lock);
818 		goto err_free_client_name;
819 	}
820 	p = &dev->clients.rb_node;
821 	while (*p) {
822 		parent = *p;
823 		entry = rb_entry(parent, struct ion_client, node);
824 
825 		if (client < entry)
826 			p = &(*p)->rb_left;
827 		else if (client > entry)
828 			p = &(*p)->rb_right;
829 	}
830 	rb_link_node(&client->node, parent, p);
831 	rb_insert_color(&client->node, &dev->clients);
832 
833 	client->debug_root = debugfs_create_file(client->display_name, 0664,
834 						dev->clients_debug_root,
835 						client, &debug_client_fops);
836 	if (!client->debug_root) {
837 		char buf[256], *path;
838 
839 		path = dentry_path(dev->clients_debug_root, buf, 256);
840 		pr_err("Failed to create client debugfs at %s/%s\n",
841 			path, client->display_name);
842 	}
843 
844 	up_write(&dev->lock);
845 
846 	return client;
847 
848 err_free_client_name:
849 	kfree(client->name);
850 err_free_client:
851 	kfree(client);
852 err_put_task_struct:
853 	if (task)
854 		put_task_struct(current->group_leader);
855 	return ERR_PTR(-ENOMEM);
856 }
857 EXPORT_SYMBOL(ion_client_create);
858 
ion_client_destroy(struct ion_client * client)859 void ion_client_destroy(struct ion_client *client)
860 {
861 	struct ion_device *dev = client->dev;
862 	struct rb_node *n;
863 
864 	while ((n = rb_first(&client->handles))) {
865 		struct ion_handle *handle = rb_entry(n, struct ion_handle,
866 						     node);
867 		ion_handle_destroy(&handle->ref);
868 	}
869 
870 	idr_destroy(&client->idr);
871 
872 	down_write(&dev->lock);
873 	if (client->task)
874 		put_task_struct(client->task);
875 	rb_erase(&client->node, &dev->clients);
876 	debugfs_remove_recursive(client->debug_root);
877 	up_write(&dev->lock);
878 
879 	kfree(client->display_name);
880 	kfree(client->name);
881 	kfree(client);
882 }
883 EXPORT_SYMBOL(ion_client_destroy);
884 
ion_sg_table(struct ion_client * client,struct ion_handle * handle)885 struct sg_table *ion_sg_table(struct ion_client *client,
886 			      struct ion_handle *handle)
887 {
888 	struct ion_buffer *buffer;
889 	struct sg_table *table;
890 
891 	mutex_lock(&client->lock);
892 	if (!ion_handle_validate(client, handle)) {
893 		pr_err("%s: invalid handle passed to map_dma.\n",
894 		       __func__);
895 		mutex_unlock(&client->lock);
896 		return ERR_PTR(-EINVAL);
897 	}
898 	buffer = handle->buffer;
899 	table = buffer->sg_table;
900 	mutex_unlock(&client->lock);
901 	return table;
902 }
903 EXPORT_SYMBOL(ion_sg_table);
904 
905 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
906 				       struct device *dev,
907 				       enum dma_data_direction direction);
908 
ion_map_dma_buf(struct dma_buf_attachment * attachment,enum dma_data_direction direction)909 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
910 					enum dma_data_direction direction)
911 {
912 	struct dma_buf *dmabuf = attachment->dmabuf;
913 	struct ion_buffer *buffer = dmabuf->priv;
914 
915 	ion_buffer_sync_for_device(buffer, attachment->dev, direction);
916 	return buffer->sg_table;
917 }
918 
ion_unmap_dma_buf(struct dma_buf_attachment * attachment,struct sg_table * table,enum dma_data_direction direction)919 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
920 			      struct sg_table *table,
921 			      enum dma_data_direction direction)
922 {
923 }
924 
ion_pages_sync_for_device(struct device * dev,struct page * page,size_t size,enum dma_data_direction dir)925 void ion_pages_sync_for_device(struct device *dev, struct page *page,
926 		size_t size, enum dma_data_direction dir)
927 {
928 	struct scatterlist sg;
929 
930 	sg_init_table(&sg, 1);
931 	sg_set_page(&sg, page, size, 0);
932 	/*
933 	 * This is not correct - sg_dma_address needs a dma_addr_t that is valid
934 	 * for the targeted device, but this works on the currently targeted
935 	 * hardware.
936 	 */
937 	sg_dma_address(&sg) = page_to_phys(page);
938 	dma_sync_sg_for_device(dev, &sg, 1, dir);
939 }
940 
941 struct ion_vma_list {
942 	struct list_head list;
943 	struct vm_area_struct *vma;
944 };
945 
ion_buffer_sync_for_device(struct ion_buffer * buffer,struct device * dev,enum dma_data_direction dir)946 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
947 				       struct device *dev,
948 				       enum dma_data_direction dir)
949 {
950 	struct ion_vma_list *vma_list;
951 	int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
952 	int i;
953 
954 	if (!ion_buffer_fault_user_mappings(buffer))
955 		return;
956 
957 	mutex_lock(&buffer->lock);
958 	for (i = 0; i < pages; i++) {
959 		struct page *page = buffer->pages[i];
960 
961 		if (ion_buffer_page_is_dirty(page))
962 			ion_pages_sync_for_device(dev, ion_buffer_page(page),
963 							PAGE_SIZE, dir);
964 
965 		ion_buffer_page_clean(buffer->pages + i);
966 	}
967 	list_for_each_entry(vma_list, &buffer->vmas, list) {
968 		struct vm_area_struct *vma = vma_list->vma;
969 
970 		zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
971 			       NULL);
972 	}
973 	mutex_unlock(&buffer->lock);
974 }
975 
ion_vm_fault(struct vm_area_struct * vma,struct vm_fault * vmf)976 static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
977 {
978 	struct ion_buffer *buffer = vma->vm_private_data;
979 	unsigned long pfn;
980 	int ret;
981 
982 	mutex_lock(&buffer->lock);
983 	ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
984 	BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
985 
986 	pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
987 	ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
988 	mutex_unlock(&buffer->lock);
989 	if (ret)
990 		return VM_FAULT_ERROR;
991 
992 	return VM_FAULT_NOPAGE;
993 }
994 
ion_vm_open(struct vm_area_struct * vma)995 static void ion_vm_open(struct vm_area_struct *vma)
996 {
997 	struct ion_buffer *buffer = vma->vm_private_data;
998 	struct ion_vma_list *vma_list;
999 
1000 	vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
1001 	if (!vma_list)
1002 		return;
1003 	vma_list->vma = vma;
1004 	mutex_lock(&buffer->lock);
1005 	list_add(&vma_list->list, &buffer->vmas);
1006 	mutex_unlock(&buffer->lock);
1007 }
1008 
ion_vm_close(struct vm_area_struct * vma)1009 static void ion_vm_close(struct vm_area_struct *vma)
1010 {
1011 	struct ion_buffer *buffer = vma->vm_private_data;
1012 	struct ion_vma_list *vma_list, *tmp;
1013 
1014 	mutex_lock(&buffer->lock);
1015 	list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
1016 		if (vma_list->vma != vma)
1017 			continue;
1018 		list_del(&vma_list->list);
1019 		kfree(vma_list);
1020 		break;
1021 	}
1022 	mutex_unlock(&buffer->lock);
1023 }
1024 
1025 static const struct vm_operations_struct ion_vma_ops = {
1026 	.open = ion_vm_open,
1027 	.close = ion_vm_close,
1028 	.fault = ion_vm_fault,
1029 };
1030 
ion_mmap(struct dma_buf * dmabuf,struct vm_area_struct * vma)1031 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
1032 {
1033 	struct ion_buffer *buffer = dmabuf->priv;
1034 	int ret = 0;
1035 
1036 	if (!buffer->heap->ops->map_user) {
1037 		pr_err("%s: this heap does not define a method for mapping to userspace\n",
1038 			__func__);
1039 		return -EINVAL;
1040 	}
1041 
1042 	if (ion_buffer_fault_user_mappings(buffer)) {
1043 		vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
1044 							VM_DONTDUMP;
1045 		vma->vm_private_data = buffer;
1046 		vma->vm_ops = &ion_vma_ops;
1047 		ion_vm_open(vma);
1048 		return 0;
1049 	}
1050 
1051 	if (!(buffer->flags & ION_FLAG_CACHED))
1052 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1053 
1054 	mutex_lock(&buffer->lock);
1055 	/* now map it to userspace */
1056 	ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1057 	mutex_unlock(&buffer->lock);
1058 
1059 	if (ret)
1060 		pr_err("%s: failure mapping buffer to userspace\n",
1061 		       __func__);
1062 
1063 	return ret;
1064 }
1065 
ion_dma_buf_release(struct dma_buf * dmabuf)1066 static void ion_dma_buf_release(struct dma_buf *dmabuf)
1067 {
1068 	struct ion_buffer *buffer = dmabuf->priv;
1069 
1070 	ion_buffer_put(buffer);
1071 }
1072 
ion_dma_buf_kmap(struct dma_buf * dmabuf,unsigned long offset)1073 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1074 {
1075 	struct ion_buffer *buffer = dmabuf->priv;
1076 
1077 	return buffer->vaddr + offset * PAGE_SIZE;
1078 }
1079 
ion_dma_buf_kunmap(struct dma_buf * dmabuf,unsigned long offset,void * ptr)1080 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1081 			       void *ptr)
1082 {
1083 }
1084 
ion_dma_buf_begin_cpu_access(struct dma_buf * dmabuf,size_t start,size_t len,enum dma_data_direction direction)1085 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1086 					size_t len,
1087 					enum dma_data_direction direction)
1088 {
1089 	struct ion_buffer *buffer = dmabuf->priv;
1090 	void *vaddr;
1091 
1092 	if (!buffer->heap->ops->map_kernel) {
1093 		pr_err("%s: map kernel is not implemented by this heap.\n",
1094 		       __func__);
1095 		return -ENODEV;
1096 	}
1097 
1098 	mutex_lock(&buffer->lock);
1099 	vaddr = ion_buffer_kmap_get(buffer);
1100 	mutex_unlock(&buffer->lock);
1101 	return PTR_ERR_OR_ZERO(vaddr);
1102 }
1103 
ion_dma_buf_end_cpu_access(struct dma_buf * dmabuf,size_t start,size_t len,enum dma_data_direction direction)1104 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1105 				       size_t len,
1106 				       enum dma_data_direction direction)
1107 {
1108 	struct ion_buffer *buffer = dmabuf->priv;
1109 
1110 	mutex_lock(&buffer->lock);
1111 	ion_buffer_kmap_put(buffer);
1112 	mutex_unlock(&buffer->lock);
1113 }
1114 
1115 static struct dma_buf_ops dma_buf_ops = {
1116 	.map_dma_buf = ion_map_dma_buf,
1117 	.unmap_dma_buf = ion_unmap_dma_buf,
1118 	.mmap = ion_mmap,
1119 	.release = ion_dma_buf_release,
1120 	.begin_cpu_access = ion_dma_buf_begin_cpu_access,
1121 	.end_cpu_access = ion_dma_buf_end_cpu_access,
1122 	.kmap_atomic = ion_dma_buf_kmap,
1123 	.kunmap_atomic = ion_dma_buf_kunmap,
1124 	.kmap = ion_dma_buf_kmap,
1125 	.kunmap = ion_dma_buf_kunmap,
1126 };
1127 
__ion_share_dma_buf(struct ion_client * client,struct ion_handle * handle,bool lock_client)1128 static struct dma_buf *__ion_share_dma_buf(struct ion_client *client,
1129 					   struct ion_handle *handle,
1130 					   bool lock_client)
1131 {
1132 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1133 	struct ion_buffer *buffer;
1134 	struct dma_buf *dmabuf;
1135 	bool valid_handle;
1136 
1137 	if (lock_client)
1138 		mutex_lock(&client->lock);
1139 	valid_handle = ion_handle_validate(client, handle);
1140 	if (!valid_handle) {
1141 		WARN(1, "%s: invalid handle passed to share.\n", __func__);
1142 		if (lock_client)
1143 			mutex_unlock(&client->lock);
1144 		return ERR_PTR(-EINVAL);
1145 	}
1146 	buffer = handle->buffer;
1147 	ion_buffer_get(buffer);
1148 	if (lock_client)
1149 		mutex_unlock(&client->lock);
1150 
1151 	exp_info.ops = &dma_buf_ops;
1152 	exp_info.size = buffer->size;
1153 	exp_info.flags = O_RDWR;
1154 	exp_info.priv = buffer;
1155 
1156 	dmabuf = dma_buf_export(&exp_info);
1157 	if (IS_ERR(dmabuf)) {
1158 		ion_buffer_put(buffer);
1159 		return dmabuf;
1160 	}
1161 
1162 	return dmabuf;
1163 }
1164 
ion_share_dma_buf(struct ion_client * client,struct ion_handle * handle)1165 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1166 				  struct ion_handle *handle)
1167 {
1168 	return __ion_share_dma_buf(client, handle, true);
1169 }
1170 EXPORT_SYMBOL(ion_share_dma_buf);
1171 
__ion_share_dma_buf_fd(struct ion_client * client,struct ion_handle * handle,bool lock_client)1172 static int __ion_share_dma_buf_fd(struct ion_client *client,
1173 				  struct ion_handle *handle, bool lock_client)
1174 {
1175 	struct dma_buf *dmabuf;
1176 	int fd;
1177 
1178 	dmabuf = __ion_share_dma_buf(client, handle, lock_client);
1179 	if (IS_ERR(dmabuf))
1180 		return PTR_ERR(dmabuf);
1181 
1182 	fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1183 	if (fd < 0)
1184 		dma_buf_put(dmabuf);
1185 
1186 	return fd;
1187 }
1188 
ion_share_dma_buf_fd(struct ion_client * client,struct ion_handle * handle)1189 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1190 {
1191 	return __ion_share_dma_buf_fd(client, handle, true);
1192 }
1193 EXPORT_SYMBOL(ion_share_dma_buf_fd);
1194 
ion_share_dma_buf_fd_nolock(struct ion_client * client,struct ion_handle * handle)1195 static int ion_share_dma_buf_fd_nolock(struct ion_client *client,
1196 				       struct ion_handle *handle)
1197 {
1198 	return __ion_share_dma_buf_fd(client, handle, false);
1199 }
1200 
ion_import_dma_buf(struct ion_client * client,int fd)1201 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1202 {
1203 	struct dma_buf *dmabuf;
1204 	struct ion_buffer *buffer;
1205 	struct ion_handle *handle;
1206 	int ret;
1207 
1208 	dmabuf = dma_buf_get(fd);
1209 	if (IS_ERR(dmabuf))
1210 		return ERR_CAST(dmabuf);
1211 	/* if this memory came from ion */
1212 
1213 	if (dmabuf->ops != &dma_buf_ops) {
1214 		pr_err("%s: can not import dmabuf from another exporter\n",
1215 		       __func__);
1216 		dma_buf_put(dmabuf);
1217 		return ERR_PTR(-EINVAL);
1218 	}
1219 	buffer = dmabuf->priv;
1220 
1221 	mutex_lock(&client->lock);
1222 	/* if a handle exists for this buffer just take a reference to it */
1223 	handle = ion_handle_lookup(client, buffer);
1224 	if (!IS_ERR(handle)) {
1225 		handle = ion_handle_get_check_overflow(handle);
1226 		mutex_unlock(&client->lock);
1227 		goto end;
1228 	}
1229 
1230 	handle = ion_handle_create(client, buffer);
1231 	if (IS_ERR(handle)) {
1232 		mutex_unlock(&client->lock);
1233 		goto end;
1234 	}
1235 
1236 	ret = ion_handle_add(client, handle);
1237 	mutex_unlock(&client->lock);
1238 	if (ret) {
1239 		ion_handle_put(handle);
1240 		handle = ERR_PTR(ret);
1241 	}
1242 
1243 end:
1244 	dma_buf_put(dmabuf);
1245 	return handle;
1246 }
1247 EXPORT_SYMBOL(ion_import_dma_buf);
1248 
ion_sync_for_device(struct ion_client * client,int fd)1249 static int ion_sync_for_device(struct ion_client *client, int fd)
1250 {
1251 	struct dma_buf *dmabuf;
1252 	struct ion_buffer *buffer;
1253 
1254 	dmabuf = dma_buf_get(fd);
1255 	if (IS_ERR(dmabuf))
1256 		return PTR_ERR(dmabuf);
1257 
1258 	/* if this memory came from ion */
1259 	if (dmabuf->ops != &dma_buf_ops) {
1260 		pr_err("%s: can not sync dmabuf from another exporter\n",
1261 		       __func__);
1262 		dma_buf_put(dmabuf);
1263 		return -EINVAL;
1264 	}
1265 	buffer = dmabuf->priv;
1266 
1267 	dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1268 			       buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1269 	dma_buf_put(dmabuf);
1270 	return 0;
1271 }
1272 
1273 /* fix up the cases where the ioctl direction bits are incorrect */
ion_ioctl_dir(unsigned int cmd)1274 static unsigned int ion_ioctl_dir(unsigned int cmd)
1275 {
1276 	switch (cmd) {
1277 	case ION_IOC_SYNC:
1278 	case ION_IOC_FREE:
1279 	case ION_IOC_CUSTOM:
1280 		return _IOC_WRITE;
1281 	default:
1282 		return _IOC_DIR(cmd);
1283 	}
1284 }
1285 
ion_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)1286 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1287 {
1288 	struct ion_client *client = filp->private_data;
1289 	struct ion_device *dev = client->dev;
1290 	struct ion_handle *cleanup_handle = NULL;
1291 	int ret = 0;
1292 	unsigned int dir;
1293 
1294 	union {
1295 		struct ion_fd_data fd;
1296 		struct ion_allocation_data allocation;
1297 		struct ion_handle_data handle;
1298 		struct ion_custom_data custom;
1299 	} data;
1300 
1301 	dir = ion_ioctl_dir(cmd);
1302 
1303 	if (_IOC_SIZE(cmd) > sizeof(data))
1304 		return -EINVAL;
1305 
1306 	if (dir & _IOC_WRITE)
1307 		if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
1308 			return -EFAULT;
1309 
1310 	switch (cmd) {
1311 	case ION_IOC_ALLOC:
1312 	{
1313 		struct ion_handle *handle;
1314 
1315 		handle = ion_alloc(client, data.allocation.len,
1316 						data.allocation.align,
1317 						data.allocation.heap_id_mask,
1318 						data.allocation.flags);
1319 		if (IS_ERR(handle))
1320 			return PTR_ERR(handle);
1321 
1322 		data.allocation.handle = handle->id;
1323 
1324 		cleanup_handle = handle;
1325 		break;
1326 	}
1327 	case ION_IOC_FREE:
1328 	{
1329 		struct ion_handle *handle;
1330 
1331 		mutex_lock(&client->lock);
1332 		handle = ion_handle_get_by_id_nolock(client, data.handle.handle);
1333 		if (IS_ERR(handle)) {
1334 			mutex_unlock(&client->lock);
1335 			return PTR_ERR(handle);
1336 		}
1337 		ion_free_nolock(client, handle);
1338 		ion_handle_put_nolock(handle);
1339 		mutex_unlock(&client->lock);
1340 		break;
1341 	}
1342 	case ION_IOC_SHARE:
1343 	case ION_IOC_MAP:
1344 	{
1345 		struct ion_handle *handle;
1346 
1347 		mutex_lock(&client->lock);
1348 		handle = ion_handle_get_by_id_nolock(client, data.handle.handle);
1349 		if (IS_ERR(handle)) {
1350 			mutex_unlock(&client->lock);
1351 			return PTR_ERR(handle);
1352 		}
1353 		data.fd.fd = ion_share_dma_buf_fd_nolock(client, handle);
1354 		ion_handle_put_nolock(handle);
1355 		mutex_unlock(&client->lock);
1356 		if (data.fd.fd < 0)
1357 			ret = data.fd.fd;
1358 		break;
1359 	}
1360 	case ION_IOC_IMPORT:
1361 	{
1362 		struct ion_handle *handle;
1363 
1364 		handle = ion_import_dma_buf(client, data.fd.fd);
1365 		if (IS_ERR(handle))
1366 			ret = PTR_ERR(handle);
1367 		else
1368 			data.handle.handle = handle->id;
1369 		break;
1370 	}
1371 	case ION_IOC_SYNC:
1372 	{
1373 		ret = ion_sync_for_device(client, data.fd.fd);
1374 		break;
1375 	}
1376 	case ION_IOC_CUSTOM:
1377 	{
1378 		if (!dev->custom_ioctl)
1379 			return -ENOTTY;
1380 		ret = dev->custom_ioctl(client, data.custom.cmd,
1381 						data.custom.arg);
1382 		break;
1383 	}
1384 	default:
1385 		return -ENOTTY;
1386 	}
1387 
1388 	if (dir & _IOC_READ) {
1389 		if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
1390 			if (cleanup_handle)
1391 				ion_free(client, cleanup_handle);
1392 			return -EFAULT;
1393 		}
1394 	}
1395 	return ret;
1396 }
1397 
ion_release(struct inode * inode,struct file * file)1398 static int ion_release(struct inode *inode, struct file *file)
1399 {
1400 	struct ion_client *client = file->private_data;
1401 
1402 	ion_client_destroy(client);
1403 	return 0;
1404 }
1405 
ion_open(struct inode * inode,struct file * file)1406 static int ion_open(struct inode *inode, struct file *file)
1407 {
1408 	struct miscdevice *miscdev = file->private_data;
1409 	struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1410 	struct ion_client *client;
1411 	char debug_name[64];
1412 
1413 	snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1414 	client = ion_client_create(dev, debug_name);
1415 	if (IS_ERR(client))
1416 		return PTR_ERR(client);
1417 	file->private_data = client;
1418 
1419 	return 0;
1420 }
1421 
1422 static const struct file_operations ion_fops = {
1423 	.owner          = THIS_MODULE,
1424 	.open           = ion_open,
1425 	.release        = ion_release,
1426 	.unlocked_ioctl = ion_ioctl,
1427 	.compat_ioctl   = compat_ion_ioctl,
1428 };
1429 
ion_debug_heap_total(struct ion_client * client,unsigned int id)1430 static size_t ion_debug_heap_total(struct ion_client *client,
1431 				   unsigned int id)
1432 {
1433 	size_t size = 0;
1434 	struct rb_node *n;
1435 
1436 	mutex_lock(&client->lock);
1437 	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1438 		struct ion_handle *handle = rb_entry(n,
1439 						     struct ion_handle,
1440 						     node);
1441 		if (handle->buffer->heap->id == id)
1442 			size += handle->buffer->size;
1443 	}
1444 	mutex_unlock(&client->lock);
1445 	return size;
1446 }
1447 
ion_debug_heap_show(struct seq_file * s,void * unused)1448 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1449 {
1450 	struct ion_heap *heap = s->private;
1451 	struct ion_device *dev = heap->dev;
1452 	struct rb_node *n;
1453 	size_t total_size = 0;
1454 	size_t total_orphaned_size = 0;
1455 
1456 	seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
1457 	seq_puts(s, "----------------------------------------------------\n");
1458 
1459 	for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1460 		struct ion_client *client = rb_entry(n, struct ion_client,
1461 						     node);
1462 		size_t size = ion_debug_heap_total(client, heap->id);
1463 
1464 		if (!size)
1465 			continue;
1466 		if (client->task) {
1467 			char task_comm[TASK_COMM_LEN];
1468 
1469 			get_task_comm(task_comm, client->task);
1470 			seq_printf(s, "%16s %16u %16zu\n", task_comm,
1471 				   client->pid, size);
1472 		} else {
1473 			seq_printf(s, "%16s %16u %16zu\n", client->name,
1474 				   client->pid, size);
1475 		}
1476 	}
1477 	seq_puts(s, "----------------------------------------------------\n");
1478 	seq_puts(s, "orphaned allocations (info is from last known client):\n");
1479 	mutex_lock(&dev->buffer_lock);
1480 	for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1481 		struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1482 						     node);
1483 		if (buffer->heap->id != heap->id)
1484 			continue;
1485 		total_size += buffer->size;
1486 		if (!buffer->handle_count) {
1487 			seq_printf(s, "%16s %16u %16zu %d %d\n",
1488 				   buffer->task_comm, buffer->pid,
1489 				   buffer->size, buffer->kmap_cnt,
1490 				   atomic_read(&buffer->ref.refcount));
1491 			total_orphaned_size += buffer->size;
1492 		}
1493 	}
1494 	mutex_unlock(&dev->buffer_lock);
1495 	seq_puts(s, "----------------------------------------------------\n");
1496 	seq_printf(s, "%16s %16zu\n", "total orphaned",
1497 		   total_orphaned_size);
1498 	seq_printf(s, "%16s %16zu\n", "total ", total_size);
1499 	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1500 		seq_printf(s, "%16s %16zu\n", "deferred free",
1501 				heap->free_list_size);
1502 	seq_puts(s, "----------------------------------------------------\n");
1503 
1504 	if (heap->debug_show)
1505 		heap->debug_show(heap, s, unused);
1506 
1507 	return 0;
1508 }
1509 
ion_debug_heap_open(struct inode * inode,struct file * file)1510 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1511 {
1512 	return single_open(file, ion_debug_heap_show, inode->i_private);
1513 }
1514 
1515 static const struct file_operations debug_heap_fops = {
1516 	.open = ion_debug_heap_open,
1517 	.read = seq_read,
1518 	.llseek = seq_lseek,
1519 	.release = single_release,
1520 };
1521 
debug_shrink_set(void * data,u64 val)1522 static int debug_shrink_set(void *data, u64 val)
1523 {
1524 	struct ion_heap *heap = data;
1525 	struct shrink_control sc;
1526 	int objs;
1527 
1528 	sc.gfp_mask = -1;
1529 	sc.nr_to_scan = val;
1530 
1531 	if (!val) {
1532 		objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1533 		sc.nr_to_scan = objs;
1534 	}
1535 
1536 	heap->shrinker.scan_objects(&heap->shrinker, &sc);
1537 	return 0;
1538 }
1539 
debug_shrink_get(void * data,u64 * val)1540 static int debug_shrink_get(void *data, u64 *val)
1541 {
1542 	struct ion_heap *heap = data;
1543 	struct shrink_control sc;
1544 	int objs;
1545 
1546 	sc.gfp_mask = -1;
1547 	sc.nr_to_scan = 0;
1548 
1549 	objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1550 	*val = objs;
1551 	return 0;
1552 }
1553 
1554 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1555 			debug_shrink_set, "%llu\n");
1556 
ion_device_add_heap(struct ion_device * dev,struct ion_heap * heap)1557 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1558 {
1559 	struct dentry *debug_file;
1560 
1561 	if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1562 	    !heap->ops->unmap_dma)
1563 		pr_err("%s: can not add heap with invalid ops struct.\n",
1564 		       __func__);
1565 
1566 	spin_lock_init(&heap->free_lock);
1567 	heap->free_list_size = 0;
1568 
1569 	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1570 		ion_heap_init_deferred_free(heap);
1571 
1572 	if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1573 		ion_heap_init_shrinker(heap);
1574 
1575 	heap->dev = dev;
1576 	down_write(&dev->lock);
1577 	/*
1578 	 * use negative heap->id to reverse the priority -- when traversing
1579 	 * the list later attempt higher id numbers first
1580 	 */
1581 	plist_node_init(&heap->node, -heap->id);
1582 	plist_add(&heap->node, &dev->heaps);
1583 	debug_file = debugfs_create_file(heap->name, 0664,
1584 					dev->heaps_debug_root, heap,
1585 					&debug_heap_fops);
1586 
1587 	if (!debug_file) {
1588 		char buf[256], *path;
1589 
1590 		path = dentry_path(dev->heaps_debug_root, buf, 256);
1591 		pr_err("Failed to create heap debugfs at %s/%s\n",
1592 			path, heap->name);
1593 	}
1594 
1595 	if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
1596 		char debug_name[64];
1597 
1598 		snprintf(debug_name, 64, "%s_shrink", heap->name);
1599 		debug_file = debugfs_create_file(
1600 			debug_name, 0644, dev->heaps_debug_root, heap,
1601 			&debug_shrink_fops);
1602 		if (!debug_file) {
1603 			char buf[256], *path;
1604 
1605 			path = dentry_path(dev->heaps_debug_root, buf, 256);
1606 			pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1607 				path, debug_name);
1608 		}
1609 	}
1610 
1611 	up_write(&dev->lock);
1612 }
1613 EXPORT_SYMBOL(ion_device_add_heap);
1614 
ion_device_create(long (* custom_ioctl)(struct ion_client * client,unsigned int cmd,unsigned long arg))1615 struct ion_device *ion_device_create(long (*custom_ioctl)
1616 				     (struct ion_client *client,
1617 				      unsigned int cmd,
1618 				      unsigned long arg))
1619 {
1620 	struct ion_device *idev;
1621 	int ret;
1622 
1623 	idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1624 	if (!idev)
1625 		return ERR_PTR(-ENOMEM);
1626 
1627 	idev->dev.minor = MISC_DYNAMIC_MINOR;
1628 	idev->dev.name = "ion";
1629 	idev->dev.fops = &ion_fops;
1630 	idev->dev.parent = NULL;
1631 	ret = misc_register(&idev->dev);
1632 	if (ret) {
1633 		pr_err("ion: failed to register misc device.\n");
1634 		kfree(idev);
1635 		return ERR_PTR(ret);
1636 	}
1637 
1638 	idev->debug_root = debugfs_create_dir("ion", NULL);
1639 	if (!idev->debug_root) {
1640 		pr_err("ion: failed to create debugfs root directory.\n");
1641 		goto debugfs_done;
1642 	}
1643 	idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1644 	if (!idev->heaps_debug_root) {
1645 		pr_err("ion: failed to create debugfs heaps directory.\n");
1646 		goto debugfs_done;
1647 	}
1648 	idev->clients_debug_root = debugfs_create_dir("clients",
1649 						idev->debug_root);
1650 	if (!idev->clients_debug_root)
1651 		pr_err("ion: failed to create debugfs clients directory.\n");
1652 
1653 debugfs_done:
1654 
1655 	idev->custom_ioctl = custom_ioctl;
1656 	idev->buffers = RB_ROOT;
1657 	mutex_init(&idev->buffer_lock);
1658 	init_rwsem(&idev->lock);
1659 	plist_head_init(&idev->heaps);
1660 	idev->clients = RB_ROOT;
1661 	return idev;
1662 }
1663 EXPORT_SYMBOL(ion_device_create);
1664 
ion_device_destroy(struct ion_device * dev)1665 void ion_device_destroy(struct ion_device *dev)
1666 {
1667 	misc_deregister(&dev->dev);
1668 	debugfs_remove_recursive(dev->debug_root);
1669 	/* XXX need to free the heaps and clients ? */
1670 	kfree(dev);
1671 }
1672 EXPORT_SYMBOL(ion_device_destroy);
1673 
ion_reserve(struct ion_platform_data * data)1674 void __init ion_reserve(struct ion_platform_data *data)
1675 {
1676 	int i;
1677 
1678 	for (i = 0; i < data->nr; i++) {
1679 		if (data->heaps[i].size == 0)
1680 			continue;
1681 
1682 		if (data->heaps[i].base == 0) {
1683 			phys_addr_t paddr;
1684 
1685 			paddr = memblock_alloc_base(data->heaps[i].size,
1686 						    data->heaps[i].align,
1687 						    MEMBLOCK_ALLOC_ANYWHERE);
1688 			if (!paddr) {
1689 				pr_err("%s: error allocating memblock for heap %d\n",
1690 					__func__, i);
1691 				continue;
1692 			}
1693 			data->heaps[i].base = paddr;
1694 		} else {
1695 			int ret = memblock_reserve(data->heaps[i].base,
1696 					       data->heaps[i].size);
1697 			if (ret)
1698 				pr_err("memblock reserve of %zx@%lx failed\n",
1699 				       data->heaps[i].size,
1700 				       data->heaps[i].base);
1701 		}
1702 		pr_info("%s: %s reserved base %lx size %zu\n", __func__,
1703 			data->heaps[i].name,
1704 			data->heaps[i].base,
1705 			data->heaps[i].size);
1706 	}
1707 }
1708