• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2 
3  * drivers/gpu/ion/ion.c
4  *
5  * Copyright (C) 2011 Google, Inc.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17 
18 #include <linux/device.h>
19 #include <linux/file.h>
20 #include <linux/freezer.h>
21 #include <linux/fs.h>
22 #include <linux/anon_inodes.h>
23 #include <linux/ion.h>
24 #include <linux/kthread.h>
25 #include <linux/list.h>
26 #include <linux/memblock.h>
27 #include <linux/miscdevice.h>
28 #include <linux/export.h>
29 #include <linux/mm.h>
30 #include <linux/mm_types.h>
31 #include <linux/rbtree.h>
32 #include <linux/slab.h>
33 #include <linux/seq_file.h>
34 #include <linux/uaccess.h>
35 #include <linux/vmalloc.h>
36 #include <linux/debugfs.h>
37 #include <linux/dma-buf.h>
38 #include <linux/idr.h>
39 
40 #include "ion_priv.h"
41 
42 /**
43  * struct ion_device - the metadata of the ion device node
44  * @dev:		the actual misc device
45  * @buffers:		an rb tree of all the existing buffers
46  * @buffer_lock:	lock protecting the tree of buffers
47  * @lock:		rwsem protecting the tree of heaps and clients
48  * @heaps:		list of all the heaps in the system
49  * @user_clients:	list of all the clients created from userspace
50  */
51 struct ion_device {
52 	struct miscdevice dev;
53 	struct rb_root buffers;
54 	struct mutex buffer_lock;
55 	struct rw_semaphore lock;
56 	struct plist_head heaps;
57 	long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
58 			      unsigned long arg);
59 	struct rb_root clients;
60 	struct dentry *debug_root;
61 };
62 
63 /**
64  * struct ion_client - a process/hw block local address space
65  * @node:		node in the tree of all clients
66  * @dev:		backpointer to ion device
67  * @handles:		an rb tree of all the handles in this client
68  * @idr:		an idr space for allocating handle ids
69  * @lock:		lock protecting the tree of handles
70  * @name:		used for debugging
71  * @task:		used for debugging
72  *
73  * A client represents a list of buffers this client may access.
74  * The mutex stored here is used to protect both handles tree
75  * as well as the handles themselves, and should be held while modifying either.
76  */
77 struct ion_client {
78 	struct rb_node node;
79 	struct ion_device *dev;
80 	struct rb_root handles;
81 	struct idr idr;
82 	struct mutex lock;
83 	const char *name;
84 	struct task_struct *task;
85 	pid_t pid;
86 	struct dentry *debug_root;
87 };
88 
89 /**
90  * ion_handle - a client local reference to a buffer
91  * @ref:		reference count
92  * @client:		back pointer to the client the buffer resides in
93  * @buffer:		pointer to the buffer
94  * @node:		node in the client's handle rbtree
95  * @kmap_cnt:		count of times this client has mapped to kernel
96  * @id:			client-unique id allocated by client->idr
97  *
98  * Modifications to node, map_cnt or mapping should be protected by the
99  * lock in the client.  Other fields are never changed after initialization.
100  */
101 struct ion_handle {
102 	struct kref ref;
103 	struct ion_client *client;
104 	struct ion_buffer *buffer;
105 	struct rb_node node;
106 	unsigned int kmap_cnt;
107 	int id;
108 };
109 
ion_buffer_fault_user_mappings(struct ion_buffer * buffer)110 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
111 {
112 	return ((buffer->flags & ION_FLAG_CACHED) &&
113 		!(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
114 }
115 
ion_buffer_cached(struct ion_buffer * buffer)116 bool ion_buffer_cached(struct ion_buffer *buffer)
117 {
118 	return !!(buffer->flags & ION_FLAG_CACHED);
119 }
120 
ion_buffer_page(struct page * page)121 static inline struct page *ion_buffer_page(struct page *page)
122 {
123 	return (struct page *)((unsigned long)page & ~(1UL));
124 }
125 
ion_buffer_page_is_dirty(struct page * page)126 static inline bool ion_buffer_page_is_dirty(struct page *page)
127 {
128 	return !!((unsigned long)page & 1UL);
129 }
130 
ion_buffer_page_dirty(struct page ** page)131 static inline void ion_buffer_page_dirty(struct page **page)
132 {
133 	*page = (struct page *)((unsigned long)(*page) | 1UL);
134 }
135 
ion_buffer_page_clean(struct page ** page)136 static inline void ion_buffer_page_clean(struct page **page)
137 {
138 	*page = (struct page *)((unsigned long)(*page) & ~(1UL));
139 }
140 
141 /* this function should only be called while dev->lock is held */
ion_buffer_add(struct ion_device * dev,struct ion_buffer * buffer)142 static void ion_buffer_add(struct ion_device *dev,
143 			   struct ion_buffer *buffer)
144 {
145 	struct rb_node **p = &dev->buffers.rb_node;
146 	struct rb_node *parent = NULL;
147 	struct ion_buffer *entry;
148 
149 	while (*p) {
150 		parent = *p;
151 		entry = rb_entry(parent, struct ion_buffer, node);
152 
153 		if (buffer < entry) {
154 			p = &(*p)->rb_left;
155 		} else if (buffer > entry) {
156 			p = &(*p)->rb_right;
157 		} else {
158 			pr_err("%s: buffer already found.", __func__);
159 			BUG();
160 		}
161 	}
162 
163 	rb_link_node(&buffer->node, parent, p);
164 	rb_insert_color(&buffer->node, &dev->buffers);
165 }
166 
167 /* this function should only be called while dev->lock is held */
ion_buffer_create(struct ion_heap * heap,struct ion_device * dev,unsigned long len,unsigned long align,unsigned long flags)168 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
169 				     struct ion_device *dev,
170 				     unsigned long len,
171 				     unsigned long align,
172 				     unsigned long flags)
173 {
174 	struct ion_buffer *buffer;
175 	struct sg_table *table;
176 	struct scatterlist *sg;
177 	int i, ret;
178 
179 	buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
180 	if (!buffer)
181 		return ERR_PTR(-ENOMEM);
182 
183 	buffer->heap = heap;
184 	buffer->flags = flags;
185 	kref_init(&buffer->ref);
186 
187 	ret = heap->ops->allocate(heap, buffer, len, align, flags);
188 
189 	if (ret) {
190 		if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
191 			goto err2;
192 
193 		ion_heap_freelist_drain(heap, 0);
194 		ret = heap->ops->allocate(heap, buffer, len, align,
195 					  flags);
196 		if (ret)
197 			goto err2;
198 	}
199 
200 	buffer->dev = dev;
201 	buffer->size = len;
202 
203 	table = heap->ops->map_dma(heap, buffer);
204 	if (WARN_ONCE(table == NULL, "heap->ops->map_dma should return ERR_PTR on error"))
205 		table = ERR_PTR(-EINVAL);
206 	if (IS_ERR(table)) {
207 		heap->ops->free(buffer);
208 		kfree(buffer);
209 		return ERR_PTR(PTR_ERR(table));
210 	}
211 	buffer->sg_table = table;
212 	if (ion_buffer_fault_user_mappings(buffer)) {
213 		int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
214 		struct scatterlist *sg;
215 		int i, j, k = 0;
216 
217 		buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
218 		if (!buffer->pages) {
219 			ret = -ENOMEM;
220 			goto err1;
221 		}
222 
223 		for_each_sg(table->sgl, sg, table->nents, i) {
224 			struct page *page = sg_page(sg);
225 
226 			for (j = 0; j < sg_dma_len(sg) / PAGE_SIZE; j++)
227 				buffer->pages[k++] = page++;
228 		}
229 
230 		if (ret)
231 			goto err;
232 	}
233 
234 	buffer->dev = dev;
235 	buffer->size = len;
236 	INIT_LIST_HEAD(&buffer->vmas);
237 	mutex_init(&buffer->lock);
238 	/* this will set up dma addresses for the sglist -- it is not
239 	   technically correct as per the dma api -- a specific
240 	   device isn't really taking ownership here.  However, in practice on
241 	   our systems the only dma_address space is physical addresses.
242 	   Additionally, we can't afford the overhead of invalidating every
243 	   allocation via dma_map_sg. The implicit contract here is that
244 	   memory comming from the heaps is ready for dma, ie if it has a
245 	   cached mapping that mapping has been invalidated */
246 	for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
247 		sg_dma_address(sg) = sg_phys(sg);
248 	mutex_lock(&dev->buffer_lock);
249 	ion_buffer_add(dev, buffer);
250 	mutex_unlock(&dev->buffer_lock);
251 	return buffer;
252 
253 err:
254 	heap->ops->unmap_dma(heap, buffer);
255 	heap->ops->free(buffer);
256 err1:
257 	if (buffer->pages)
258 		vfree(buffer->pages);
259 err2:
260 	kfree(buffer);
261 	return ERR_PTR(ret);
262 }
263 
ion_buffer_destroy(struct ion_buffer * buffer)264 void ion_buffer_destroy(struct ion_buffer *buffer)
265 {
266 	if (WARN_ON(buffer->kmap_cnt > 0))
267 		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
268 	buffer->heap->ops->unmap_dma(buffer->heap, buffer);
269 	buffer->heap->ops->free(buffer);
270 	if (buffer->pages)
271 		vfree(buffer->pages);
272 	kfree(buffer);
273 }
274 
_ion_buffer_destroy(struct kref * kref)275 static void _ion_buffer_destroy(struct kref *kref)
276 {
277 	struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
278 	struct ion_heap *heap = buffer->heap;
279 	struct ion_device *dev = buffer->dev;
280 
281 	mutex_lock(&dev->buffer_lock);
282 	rb_erase(&buffer->node, &dev->buffers);
283 	mutex_unlock(&dev->buffer_lock);
284 
285 	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
286 		ion_heap_freelist_add(heap, buffer);
287 	else
288 		ion_buffer_destroy(buffer);
289 }
290 
ion_buffer_get(struct ion_buffer * buffer)291 static void ion_buffer_get(struct ion_buffer *buffer)
292 {
293 	kref_get(&buffer->ref);
294 }
295 
ion_buffer_put(struct ion_buffer * buffer)296 static int ion_buffer_put(struct ion_buffer *buffer)
297 {
298 	return kref_put(&buffer->ref, _ion_buffer_destroy);
299 }
300 
ion_buffer_add_to_handle(struct ion_buffer * buffer)301 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
302 {
303 	mutex_lock(&buffer->lock);
304 	buffer->handle_count++;
305 	mutex_unlock(&buffer->lock);
306 }
307 
ion_buffer_remove_from_handle(struct ion_buffer * buffer)308 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
309 {
310 	/*
311 	 * when a buffer is removed from a handle, if it is not in
312 	 * any other handles, copy the taskcomm and the pid of the
313 	 * process it's being removed from into the buffer.  At this
314 	 * point there will be no way to track what processes this buffer is
315 	 * being used by, it only exists as a dma_buf file descriptor.
316 	 * The taskcomm and pid can provide a debug hint as to where this fd
317 	 * is in the system
318 	 */
319 	mutex_lock(&buffer->lock);
320 	buffer->handle_count--;
321 	BUG_ON(buffer->handle_count < 0);
322 	if (!buffer->handle_count) {
323 		struct task_struct *task;
324 
325 		task = current->group_leader;
326 		get_task_comm(buffer->task_comm, task);
327 		buffer->pid = task_pid_nr(task);
328 	}
329 	mutex_unlock(&buffer->lock);
330 }
331 
ion_handle_create(struct ion_client * client,struct ion_buffer * buffer)332 static struct ion_handle *ion_handle_create(struct ion_client *client,
333 				     struct ion_buffer *buffer)
334 {
335 	struct ion_handle *handle;
336 
337 	handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
338 	if (!handle)
339 		return ERR_PTR(-ENOMEM);
340 	kref_init(&handle->ref);
341 	rb_init_node(&handle->node);
342 	handle->client = client;
343 	ion_buffer_get(buffer);
344 	ion_buffer_add_to_handle(buffer);
345 	handle->buffer = buffer;
346 
347 	return handle;
348 }
349 
350 static void ion_handle_kmap_put(struct ion_handle *);
351 
ion_handle_destroy(struct kref * kref)352 static void ion_handle_destroy(struct kref *kref)
353 {
354 	struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
355 	struct ion_client *client = handle->client;
356 	struct ion_buffer *buffer = handle->buffer;
357 
358 	mutex_lock(&buffer->lock);
359 	while (handle->kmap_cnt)
360 		ion_handle_kmap_put(handle);
361 	mutex_unlock(&buffer->lock);
362 
363 	idr_remove(&client->idr, handle->id);
364 	if (!RB_EMPTY_NODE(&handle->node))
365 		rb_erase(&handle->node, &client->handles);
366 
367 	ion_buffer_remove_from_handle(buffer);
368 	ion_buffer_put(buffer);
369 
370 	kfree(handle);
371 }
372 
ion_handle_buffer(struct ion_handle * handle)373 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
374 {
375 	return handle->buffer;
376 }
377 
ion_handle_get(struct ion_handle * handle)378 static void ion_handle_get(struct ion_handle *handle)
379 {
380 	kref_get(&handle->ref);
381 }
382 
ion_handle_put(struct ion_handle * handle)383 static int ion_handle_put(struct ion_handle *handle)
384 {
385 	struct ion_client *client = handle->client;
386 	int ret;
387 
388 	mutex_lock(&client->lock);
389 	ret = kref_put(&handle->ref, ion_handle_destroy);
390 	mutex_unlock(&client->lock);
391 
392 	return ret;
393 }
394 
ion_handle_lookup(struct ion_client * client,struct ion_buffer * buffer)395 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
396 					    struct ion_buffer *buffer)
397 {
398 	struct rb_node *n = client->handles.rb_node;
399 
400 	while (n) {
401 		struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
402 		if (buffer < entry->buffer)
403 			n = n->rb_left;
404 		else if (buffer > entry->buffer)
405 			n = n->rb_right;
406 		else
407 			return entry;
408 	}
409 	return ERR_PTR(-EINVAL);
410 }
411 
ion_handle_get_by_id(struct ion_client * client,int id)412 static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
413 						int id)
414 {
415 	struct ion_handle *handle;
416 
417 	mutex_lock(&client->lock);
418 	handle = idr_find(&client->idr, id);
419 	if (handle)
420 		ion_handle_get(handle);
421 	mutex_unlock(&client->lock);
422 
423 	return handle ? handle : ERR_PTR(-EINVAL);
424 }
425 
ion_handle_validate(struct ion_client * client,struct ion_handle * handle)426 static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
427 {
428 	WARN_ON(!mutex_is_locked(&client->lock));
429 	return (idr_find(&client->idr, handle->id) == handle);
430 }
431 
ion_handle_add(struct ion_client * client,struct ion_handle * handle)432 static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
433 {
434 	int rc;
435 	struct rb_node **p = &client->handles.rb_node;
436 	struct rb_node *parent = NULL;
437 	struct ion_handle *entry;
438 
439 	do {
440 		int id;
441 		rc = idr_pre_get(&client->idr, GFP_KERNEL);
442 		if (!rc)
443 			return -ENOMEM;
444 		rc = idr_get_new_above(&client->idr, handle, 1, &id);
445 		handle->id = id;
446 	} while (rc == -EAGAIN);
447 
448 	if (rc < 0)
449 		return rc;
450 
451 	while (*p) {
452 		parent = *p;
453 		entry = rb_entry(parent, struct ion_handle, node);
454 
455 		if (handle->buffer < entry->buffer)
456 			p = &(*p)->rb_left;
457 		else if (handle->buffer > entry->buffer)
458 			p = &(*p)->rb_right;
459 		else
460 			WARN(1, "%s: buffer already found.", __func__);
461 	}
462 
463 	rb_link_node(&handle->node, parent, p);
464 	rb_insert_color(&handle->node, &client->handles);
465 
466 	return 0;
467 }
468 
ion_alloc(struct ion_client * client,size_t len,size_t align,unsigned int heap_id_mask,unsigned int flags)469 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
470 			     size_t align, unsigned int heap_id_mask,
471 			     unsigned int flags)
472 {
473 	struct ion_handle *handle;
474 	struct ion_device *dev = client->dev;
475 	struct ion_buffer *buffer = NULL;
476 	struct ion_heap *heap;
477 	int ret;
478 
479 	pr_debug("%s: len %d align %d heap_id_mask %u flags %x\n", __func__,
480 		 len, align, heap_id_mask, flags);
481 	/*
482 	 * traverse the list of heaps available in this system in priority
483 	 * order.  If the heap type is supported by the client, and matches the
484 	 * request of the caller allocate from it.  Repeat until allocate has
485 	 * succeeded or all heaps have been tried
486 	 */
487 	if (WARN_ON(!len))
488 		return ERR_PTR(-EINVAL);
489 
490 	len = PAGE_ALIGN(len);
491 
492 	down_read(&dev->lock);
493 	plist_for_each_entry(heap, &dev->heaps, node) {
494 		/* if the caller didn't specify this heap id */
495 		if (!((1 << heap->id) & heap_id_mask))
496 			continue;
497 		buffer = ion_buffer_create(heap, dev, len, align, flags);
498 		if (!IS_ERR(buffer))
499 			break;
500 	}
501 	up_read(&dev->lock);
502 
503 	if (buffer == NULL)
504 		return ERR_PTR(-ENODEV);
505 
506 	if (IS_ERR(buffer))
507 		return ERR_PTR(PTR_ERR(buffer));
508 
509 	handle = ion_handle_create(client, buffer);
510 
511 	/*
512 	 * ion_buffer_create will create a buffer with a ref_cnt of 1,
513 	 * and ion_handle_create will take a second reference, drop one here
514 	 */
515 	ion_buffer_put(buffer);
516 
517 	if (IS_ERR(handle))
518 		return handle;
519 
520 	mutex_lock(&client->lock);
521 	ret = ion_handle_add(client, handle);
522 	mutex_unlock(&client->lock);
523 	if (ret) {
524 		ion_handle_put(handle);
525 		handle = ERR_PTR(ret);
526 	}
527 
528 	return handle;
529 }
530 EXPORT_SYMBOL(ion_alloc);
531 
ion_free(struct ion_client * client,struct ion_handle * handle)532 void ion_free(struct ion_client *client, struct ion_handle *handle)
533 {
534 	bool valid_handle;
535 
536 	BUG_ON(client != handle->client);
537 
538 	mutex_lock(&client->lock);
539 	valid_handle = ion_handle_validate(client, handle);
540 
541 	if (!valid_handle) {
542 		WARN(1, "%s: invalid handle passed to free.\n", __func__);
543 		mutex_unlock(&client->lock);
544 		return;
545 	}
546 	mutex_unlock(&client->lock);
547 	ion_handle_put(handle);
548 }
549 EXPORT_SYMBOL(ion_free);
550 
ion_phys(struct ion_client * client,struct ion_handle * handle,ion_phys_addr_t * addr,size_t * len)551 int ion_phys(struct ion_client *client, struct ion_handle *handle,
552 	     ion_phys_addr_t *addr, size_t *len)
553 {
554 	struct ion_buffer *buffer;
555 	int ret;
556 
557 	mutex_lock(&client->lock);
558 	if (!ion_handle_validate(client, handle)) {
559 		mutex_unlock(&client->lock);
560 		return -EINVAL;
561 	}
562 
563 	buffer = handle->buffer;
564 
565 	if (!buffer->heap->ops->phys) {
566 		pr_err("%s: ion_phys is not implemented by this heap.\n",
567 		       __func__);
568 		mutex_unlock(&client->lock);
569 		return -ENODEV;
570 	}
571 	mutex_unlock(&client->lock);
572 	ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
573 	return ret;
574 }
575 EXPORT_SYMBOL(ion_phys);
576 
ion_buffer_kmap_get(struct ion_buffer * buffer)577 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
578 {
579 	void *vaddr;
580 
581 	if (buffer->kmap_cnt) {
582 		buffer->kmap_cnt++;
583 		return buffer->vaddr;
584 	}
585 	vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
586 	if (WARN_ONCE(vaddr == NULL, "heap->ops->map_kernel should return ERR_PTR on error"))
587 		return ERR_PTR(-EINVAL);
588 	if (IS_ERR(vaddr))
589 		return vaddr;
590 	buffer->vaddr = vaddr;
591 	buffer->kmap_cnt++;
592 	return vaddr;
593 }
594 
ion_handle_kmap_get(struct ion_handle * handle)595 static void *ion_handle_kmap_get(struct ion_handle *handle)
596 {
597 	struct ion_buffer *buffer = handle->buffer;
598 	void *vaddr;
599 
600 	if (handle->kmap_cnt) {
601 		handle->kmap_cnt++;
602 		return buffer->vaddr;
603 	}
604 	vaddr = ion_buffer_kmap_get(buffer);
605 	if (IS_ERR(vaddr))
606 		return vaddr;
607 	handle->kmap_cnt++;
608 	return vaddr;
609 }
610 
ion_buffer_kmap_put(struct ion_buffer * buffer)611 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
612 {
613 	buffer->kmap_cnt--;
614 	if (!buffer->kmap_cnt) {
615 		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
616 		buffer->vaddr = NULL;
617 	}
618 }
619 
ion_handle_kmap_put(struct ion_handle * handle)620 static void ion_handle_kmap_put(struct ion_handle *handle)
621 {
622 	struct ion_buffer *buffer = handle->buffer;
623 
624 	handle->kmap_cnt--;
625 	if (!handle->kmap_cnt)
626 		ion_buffer_kmap_put(buffer);
627 }
628 
ion_map_kernel(struct ion_client * client,struct ion_handle * handle)629 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
630 {
631 	struct ion_buffer *buffer;
632 	void *vaddr;
633 
634 	mutex_lock(&client->lock);
635 	if (!ion_handle_validate(client, handle)) {
636 		pr_err("%s: invalid handle passed to map_kernel.\n",
637 		       __func__);
638 		mutex_unlock(&client->lock);
639 		return ERR_PTR(-EINVAL);
640 	}
641 
642 	buffer = handle->buffer;
643 
644 	if (!handle->buffer->heap->ops->map_kernel) {
645 		pr_err("%s: map_kernel is not implemented by this heap.\n",
646 		       __func__);
647 		mutex_unlock(&client->lock);
648 		return ERR_PTR(-ENODEV);
649 	}
650 
651 	mutex_lock(&buffer->lock);
652 	vaddr = ion_handle_kmap_get(handle);
653 	mutex_unlock(&buffer->lock);
654 	mutex_unlock(&client->lock);
655 	return vaddr;
656 }
657 EXPORT_SYMBOL(ion_map_kernel);
658 
ion_unmap_kernel(struct ion_client * client,struct ion_handle * handle)659 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
660 {
661 	struct ion_buffer *buffer;
662 
663 	mutex_lock(&client->lock);
664 	buffer = handle->buffer;
665 	mutex_lock(&buffer->lock);
666 	ion_handle_kmap_put(handle);
667 	mutex_unlock(&buffer->lock);
668 	mutex_unlock(&client->lock);
669 }
670 EXPORT_SYMBOL(ion_unmap_kernel);
671 
ion_debug_client_show(struct seq_file * s,void * unused)672 static int ion_debug_client_show(struct seq_file *s, void *unused)
673 {
674 	struct ion_client *client = s->private;
675 	struct rb_node *n;
676 	size_t sizes[ION_NUM_HEAP_IDS] = {0};
677 	const char *names[ION_NUM_HEAP_IDS] = {0};
678 	int i;
679 
680 	mutex_lock(&client->lock);
681 	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
682 		struct ion_handle *handle = rb_entry(n, struct ion_handle,
683 						     node);
684 		unsigned int id = handle->buffer->heap->id;
685 
686 		if (!names[id])
687 			names[id] = handle->buffer->heap->name;
688 		sizes[id] += handle->buffer->size;
689 	}
690 	mutex_unlock(&client->lock);
691 
692 	seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
693 	for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
694 		if (!names[i])
695 			continue;
696 		seq_printf(s, "%16.16s: %16u\n", names[i], sizes[i]);
697 	}
698 	return 0;
699 }
700 
ion_debug_client_open(struct inode * inode,struct file * file)701 static int ion_debug_client_open(struct inode *inode, struct file *file)
702 {
703 	return single_open(file, ion_debug_client_show, inode->i_private);
704 }
705 
706 static const struct file_operations debug_client_fops = {
707 	.open = ion_debug_client_open,
708 	.read = seq_read,
709 	.llseek = seq_lseek,
710 	.release = single_release,
711 };
712 
ion_client_create(struct ion_device * dev,const char * name)713 struct ion_client *ion_client_create(struct ion_device *dev,
714 				     const char *name)
715 {
716 	struct ion_client *client;
717 	struct task_struct *task;
718 	struct rb_node **p;
719 	struct rb_node *parent = NULL;
720 	struct ion_client *entry;
721 	char debug_name[64];
722 	pid_t pid;
723 
724 	get_task_struct(current->group_leader);
725 	task_lock(current->group_leader);
726 	pid = task_pid_nr(current->group_leader);
727 	/* don't bother to store task struct for kernel threads,
728 	   they can't be killed anyway */
729 	if (current->group_leader->flags & PF_KTHREAD) {
730 		put_task_struct(current->group_leader);
731 		task = NULL;
732 	} else {
733 		task = current->group_leader;
734 	}
735 	task_unlock(current->group_leader);
736 
737 	client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
738 	if (!client) {
739 		if (task)
740 			put_task_struct(current->group_leader);
741 		return ERR_PTR(-ENOMEM);
742 	}
743 
744 	client->dev = dev;
745 	client->handles = RB_ROOT;
746 	idr_init(&client->idr);
747 	mutex_init(&client->lock);
748 	client->name = name;
749 	client->task = task;
750 	client->pid = pid;
751 
752 	down_write(&dev->lock);
753 	p = &dev->clients.rb_node;
754 	while (*p) {
755 		parent = *p;
756 		entry = rb_entry(parent, struct ion_client, node);
757 
758 		if (client < entry)
759 			p = &(*p)->rb_left;
760 		else if (client > entry)
761 			p = &(*p)->rb_right;
762 	}
763 	rb_link_node(&client->node, parent, p);
764 	rb_insert_color(&client->node, &dev->clients);
765 
766 	snprintf(debug_name, 64, "%u", client->pid);
767 	client->debug_root = debugfs_create_file(debug_name, 0664,
768 						 dev->debug_root, client,
769 						 &debug_client_fops);
770 	up_write(&dev->lock);
771 
772 	return client;
773 }
774 EXPORT_SYMBOL(ion_client_create);
775 
ion_client_destroy(struct ion_client * client)776 void ion_client_destroy(struct ion_client *client)
777 {
778 	struct ion_device *dev = client->dev;
779 	struct rb_node *n;
780 
781 	pr_debug("%s: %d\n", __func__, __LINE__);
782 	while ((n = rb_first(&client->handles))) {
783 		struct ion_handle *handle = rb_entry(n, struct ion_handle,
784 						     node);
785 		ion_handle_destroy(&handle->ref);
786 	}
787 
788 	idr_remove_all(&client->idr);
789 	idr_destroy(&client->idr);
790 
791 	down_write(&dev->lock);
792 	if (client->task)
793 		put_task_struct(client->task);
794 	rb_erase(&client->node, &dev->clients);
795 	debugfs_remove_recursive(client->debug_root);
796 	up_write(&dev->lock);
797 
798 	kfree(client);
799 }
800 EXPORT_SYMBOL(ion_client_destroy);
801 
ion_sg_table(struct ion_client * client,struct ion_handle * handle)802 struct sg_table *ion_sg_table(struct ion_client *client,
803 			      struct ion_handle *handle)
804 {
805 	struct ion_buffer *buffer;
806 	struct sg_table *table;
807 
808 	mutex_lock(&client->lock);
809 	if (!ion_handle_validate(client, handle)) {
810 		pr_err("%s: invalid handle passed to map_dma.\n",
811 		       __func__);
812 		mutex_unlock(&client->lock);
813 		return ERR_PTR(-EINVAL);
814 	}
815 	buffer = handle->buffer;
816 	table = buffer->sg_table;
817 	mutex_unlock(&client->lock);
818 	return table;
819 }
820 EXPORT_SYMBOL(ion_sg_table);
821 
822 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
823 				       struct device *dev,
824 				       enum dma_data_direction direction);
825 
ion_map_dma_buf(struct dma_buf_attachment * attachment,enum dma_data_direction direction)826 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
827 					enum dma_data_direction direction)
828 {
829 	struct dma_buf *dmabuf = attachment->dmabuf;
830 	struct ion_buffer *buffer = dmabuf->priv;
831 
832 	ion_buffer_sync_for_device(buffer, attachment->dev, direction);
833 	return buffer->sg_table;
834 }
835 
ion_unmap_dma_buf(struct dma_buf_attachment * attachment,struct sg_table * table,enum dma_data_direction direction)836 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
837 			      struct sg_table *table,
838 			      enum dma_data_direction direction)
839 {
840 }
841 
842 struct ion_vma_list {
843 	struct list_head list;
844 	struct vm_area_struct *vma;
845 };
846 
ion_buffer_sync_for_device(struct ion_buffer * buffer,struct device * dev,enum dma_data_direction dir)847 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
848 				       struct device *dev,
849 				       enum dma_data_direction dir)
850 {
851 	struct ion_vma_list *vma_list;
852 	int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
853 	int i;
854 
855 	pr_debug("%s: syncing for device %s\n", __func__,
856 		 dev ? dev_name(dev) : "null");
857 
858 	if (!ion_buffer_fault_user_mappings(buffer))
859 		return;
860 
861 	mutex_lock(&buffer->lock);
862 	for (i = 0; i < pages; i++) {
863 		struct page *page = buffer->pages[i];
864 
865 		if (ion_buffer_page_is_dirty(page))
866 			__dma_page_cpu_to_dev(page, 0, PAGE_SIZE, dir);
867 		ion_buffer_page_clean(buffer->pages + i);
868 	}
869 	list_for_each_entry(vma_list, &buffer->vmas, list) {
870 		struct vm_area_struct *vma = vma_list->vma;
871 
872 		zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
873 			       NULL);
874 	}
875 	mutex_unlock(&buffer->lock);
876 }
877 
ion_vm_fault(struct vm_area_struct * vma,struct vm_fault * vmf)878 int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
879 {
880 	struct ion_buffer *buffer = vma->vm_private_data;
881 	int ret;
882 
883 	mutex_lock(&buffer->lock);
884 	ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
885 
886 	BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
887 	ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address,
888 			     ion_buffer_page(buffer->pages[vmf->pgoff]));
889 	mutex_unlock(&buffer->lock);
890 	if (ret)
891 		return VM_FAULT_ERROR;
892 
893 	return VM_FAULT_NOPAGE;
894 }
895 
ion_vm_open(struct vm_area_struct * vma)896 static void ion_vm_open(struct vm_area_struct *vma)
897 {
898 	struct ion_buffer *buffer = vma->vm_private_data;
899 	struct ion_vma_list *vma_list;
900 
901 	vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
902 	if (!vma_list)
903 		return;
904 	vma_list->vma = vma;
905 	mutex_lock(&buffer->lock);
906 	list_add(&vma_list->list, &buffer->vmas);
907 	mutex_unlock(&buffer->lock);
908 	pr_debug("%s: adding %p\n", __func__, vma);
909 }
910 
ion_vm_close(struct vm_area_struct * vma)911 static void ion_vm_close(struct vm_area_struct *vma)
912 {
913 	struct ion_buffer *buffer = vma->vm_private_data;
914 	struct ion_vma_list *vma_list, *tmp;
915 
916 	pr_debug("%s\n", __func__);
917 	mutex_lock(&buffer->lock);
918 	list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
919 		if (vma_list->vma != vma)
920 			continue;
921 		list_del(&vma_list->list);
922 		kfree(vma_list);
923 		pr_debug("%s: deleting %p\n", __func__, vma);
924 		break;
925 	}
926 	mutex_unlock(&buffer->lock);
927 }
928 
929 struct vm_operations_struct ion_vma_ops = {
930 	.open = ion_vm_open,
931 	.close = ion_vm_close,
932 	.fault = ion_vm_fault,
933 };
934 
ion_mmap(struct dma_buf * dmabuf,struct vm_area_struct * vma)935 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
936 {
937 	struct ion_buffer *buffer = dmabuf->priv;
938 	int ret = 0;
939 
940 	if (!buffer->heap->ops->map_user) {
941 		pr_err("%s: this heap does not define a method for mapping "
942 		       "to userspace\n", __func__);
943 		return -EINVAL;
944 	}
945 
946 	if (ion_buffer_fault_user_mappings(buffer)) {
947 		vma->vm_private_data = buffer;
948 		vma->vm_ops = &ion_vma_ops;
949 		ion_vm_open(vma);
950 		return 0;
951 	}
952 
953 	if (!(buffer->flags & ION_FLAG_CACHED))
954 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
955 
956 	mutex_lock(&buffer->lock);
957 	/* now map it to userspace */
958 	ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
959 	mutex_unlock(&buffer->lock);
960 
961 	if (ret)
962 		pr_err("%s: failure mapping buffer to userspace\n",
963 		       __func__);
964 
965 	return ret;
966 }
967 
ion_dma_buf_release(struct dma_buf * dmabuf)968 static void ion_dma_buf_release(struct dma_buf *dmabuf)
969 {
970 	struct ion_buffer *buffer = dmabuf->priv;
971 	ion_buffer_put(buffer);
972 }
973 
ion_dma_buf_kmap(struct dma_buf * dmabuf,unsigned long offset)974 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
975 {
976 	struct ion_buffer *buffer = dmabuf->priv;
977 	return buffer->vaddr + offset * PAGE_SIZE;
978 }
979 
ion_dma_buf_kunmap(struct dma_buf * dmabuf,unsigned long offset,void * ptr)980 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
981 			       void *ptr)
982 {
983 	return;
984 }
985 
ion_dma_buf_begin_cpu_access(struct dma_buf * dmabuf,size_t start,size_t len,enum dma_data_direction direction)986 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
987 					size_t len,
988 					enum dma_data_direction direction)
989 {
990 	struct ion_buffer *buffer = dmabuf->priv;
991 	void *vaddr;
992 
993 	if (!buffer->heap->ops->map_kernel) {
994 		pr_err("%s: map kernel is not implemented by this heap.\n",
995 		       __func__);
996 		return -ENODEV;
997 	}
998 
999 	mutex_lock(&buffer->lock);
1000 	vaddr = ion_buffer_kmap_get(buffer);
1001 	mutex_unlock(&buffer->lock);
1002 	if (IS_ERR(vaddr))
1003 		return PTR_ERR(vaddr);
1004 	return 0;
1005 }
1006 
ion_dma_buf_end_cpu_access(struct dma_buf * dmabuf,size_t start,size_t len,enum dma_data_direction direction)1007 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1008 				       size_t len,
1009 				       enum dma_data_direction direction)
1010 {
1011 	struct ion_buffer *buffer = dmabuf->priv;
1012 
1013 	mutex_lock(&buffer->lock);
1014 	ion_buffer_kmap_put(buffer);
1015 	mutex_unlock(&buffer->lock);
1016 }
1017 
1018 struct dma_buf_ops dma_buf_ops = {
1019 	.map_dma_buf = ion_map_dma_buf,
1020 	.unmap_dma_buf = ion_unmap_dma_buf,
1021 	.mmap = ion_mmap,
1022 	.release = ion_dma_buf_release,
1023 	.begin_cpu_access = ion_dma_buf_begin_cpu_access,
1024 	.end_cpu_access = ion_dma_buf_end_cpu_access,
1025 	.kmap_atomic = ion_dma_buf_kmap,
1026 	.kunmap_atomic = ion_dma_buf_kunmap,
1027 	.kmap = ion_dma_buf_kmap,
1028 	.kunmap = ion_dma_buf_kunmap,
1029 };
1030 
ion_share_dma_buf(struct ion_client * client,struct ion_handle * handle)1031 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1032 						struct ion_handle *handle)
1033 {
1034 	struct ion_buffer *buffer;
1035 	struct dma_buf *dmabuf;
1036 	bool valid_handle;
1037 
1038 	mutex_lock(&client->lock);
1039 	valid_handle = ion_handle_validate(client, handle);
1040 	if (!valid_handle) {
1041 		WARN(1, "%s: invalid handle passed to share.\n", __func__);
1042 		mutex_unlock(&client->lock);
1043 		return ERR_PTR(-EINVAL);
1044 	}
1045 	buffer = handle->buffer;
1046 	ion_buffer_get(buffer);
1047 	mutex_unlock(&client->lock);
1048 
1049 	dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1050 	if (IS_ERR(dmabuf)) {
1051 		ion_buffer_put(buffer);
1052 		return dmabuf;
1053 	}
1054 
1055 	return dmabuf;
1056 }
1057 EXPORT_SYMBOL(ion_share_dma_buf);
1058 
ion_share_dma_buf_fd(struct ion_client * client,struct ion_handle * handle)1059 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1060 {
1061 	struct dma_buf *dmabuf;
1062 	int fd;
1063 
1064 	dmabuf = ion_share_dma_buf(client, handle);
1065 	if (IS_ERR(dmabuf))
1066 		return PTR_ERR(dmabuf);
1067 
1068 	fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1069 	if (fd < 0)
1070 		dma_buf_put(dmabuf);
1071 
1072 	return fd;
1073 }
1074 EXPORT_SYMBOL(ion_share_dma_buf_fd);
1075 
ion_import_dma_buf(struct ion_client * client,int fd)1076 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1077 {
1078 	struct dma_buf *dmabuf;
1079 	struct ion_buffer *buffer;
1080 	struct ion_handle *handle;
1081 	int ret;
1082 
1083 	dmabuf = dma_buf_get(fd);
1084 	if (IS_ERR(dmabuf))
1085 		return ERR_PTR(PTR_ERR(dmabuf));
1086 	/* if this memory came from ion */
1087 
1088 	if (dmabuf->ops != &dma_buf_ops) {
1089 		pr_err("%s: can not import dmabuf from another exporter\n",
1090 		       __func__);
1091 		dma_buf_put(dmabuf);
1092 		return ERR_PTR(-EINVAL);
1093 	}
1094 	buffer = dmabuf->priv;
1095 
1096 	mutex_lock(&client->lock);
1097 	/* if a handle exists for this buffer just take a reference to it */
1098 	handle = ion_handle_lookup(client, buffer);
1099 	if (!IS_ERR(handle)) {
1100 		ion_handle_get(handle);
1101 		mutex_unlock(&client->lock);
1102 		goto end;
1103 	}
1104 
1105 	handle = ion_handle_create(client, buffer);
1106 	if (IS_ERR(handle)) {
1107 		mutex_unlock(&client->lock);
1108 		goto end;
1109 	}
1110 
1111 	ret = ion_handle_add(client, handle);
1112 	mutex_unlock(&client->lock);
1113 	if (ret) {
1114 		ion_handle_put(handle);
1115 		handle = ERR_PTR(ret);
1116 	}
1117 
1118 end:
1119 	dma_buf_put(dmabuf);
1120 	return handle;
1121 }
1122 EXPORT_SYMBOL(ion_import_dma_buf);
1123 
ion_sync_for_device(struct ion_client * client,int fd)1124 static int ion_sync_for_device(struct ion_client *client, int fd)
1125 {
1126 	struct dma_buf *dmabuf;
1127 	struct ion_buffer *buffer;
1128 
1129 	dmabuf = dma_buf_get(fd);
1130 	if (IS_ERR(dmabuf))
1131 		return PTR_ERR(dmabuf);
1132 
1133 	/* if this memory came from ion */
1134 	if (dmabuf->ops != &dma_buf_ops) {
1135 		pr_err("%s: can not sync dmabuf from another exporter\n",
1136 		       __func__);
1137 		dma_buf_put(dmabuf);
1138 		return -EINVAL;
1139 	}
1140 	buffer = dmabuf->priv;
1141 
1142 	dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1143 			       buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1144 	dma_buf_put(dmabuf);
1145 	return 0;
1146 }
1147 
ion_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)1148 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1149 {
1150 	struct ion_client *client = filp->private_data;
1151 
1152 	switch (cmd) {
1153 	case ION_IOC_ALLOC:
1154 	{
1155 		struct ion_allocation_data data;
1156 		struct ion_handle *handle;
1157 
1158 		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1159 			return -EFAULT;
1160 		handle = ion_alloc(client, data.len, data.align,
1161 					     data.heap_id_mask, data.flags);
1162 
1163 		if (IS_ERR(handle))
1164 			return PTR_ERR(handle);
1165 
1166 		data.handle = (struct ion_handle *)handle->id;
1167 
1168 		if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
1169 			ion_free(client, handle);
1170 			return -EFAULT;
1171 		}
1172 		break;
1173 	}
1174 	case ION_IOC_FREE:
1175 	{
1176 		struct ion_handle_data data;
1177 		struct ion_handle *handle;
1178 
1179 		if (copy_from_user(&data, (void __user *)arg,
1180 				   sizeof(struct ion_handle_data)))
1181 			return -EFAULT;
1182 		handle = ion_handle_get_by_id(client, (int)data.handle);
1183 		if (IS_ERR(handle))
1184 			return PTR_ERR(handle);
1185 		ion_free(client, handle);
1186 		ion_handle_put(handle);
1187 		break;
1188 	}
1189 	case ION_IOC_SHARE:
1190 	case ION_IOC_MAP:
1191 	{
1192 		struct ion_fd_data data;
1193 		struct ion_handle *handle;
1194 
1195 		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1196 			return -EFAULT;
1197 		handle = ion_handle_get_by_id(client, (int)data.handle);
1198 		data.fd = ion_share_dma_buf_fd(client, handle);
1199 		ion_handle_put(handle);
1200 		if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1201 			return -EFAULT;
1202 		if (data.fd < 0)
1203 			return data.fd;
1204 		break;
1205 	}
1206 	case ION_IOC_IMPORT:
1207 	{
1208 		struct ion_fd_data data;
1209 		struct ion_handle *handle;
1210 		int ret = 0;
1211 		if (copy_from_user(&data, (void __user *)arg,
1212 				   sizeof(struct ion_fd_data)))
1213 			return -EFAULT;
1214 		handle = ion_import_dma_buf(client, data.fd);
1215 		if (IS_ERR(handle))
1216 			ret = PTR_ERR(handle);
1217 		else
1218 			data.handle = (struct ion_handle *)handle->id;
1219 
1220 		if (copy_to_user((void __user *)arg, &data,
1221 				 sizeof(struct ion_fd_data)))
1222 			return -EFAULT;
1223 		if (ret < 0)
1224 			return ret;
1225 		break;
1226 	}
1227 	case ION_IOC_SYNC:
1228 	{
1229 		struct ion_fd_data data;
1230 		if (copy_from_user(&data, (void __user *)arg,
1231 				   sizeof(struct ion_fd_data)))
1232 			return -EFAULT;
1233 		ion_sync_for_device(client, data.fd);
1234 		break;
1235 	}
1236 	case ION_IOC_CUSTOM:
1237 	{
1238 		struct ion_device *dev = client->dev;
1239 		struct ion_custom_data data;
1240 
1241 		if (!dev->custom_ioctl)
1242 			return -ENOTTY;
1243 		if (copy_from_user(&data, (void __user *)arg,
1244 				sizeof(struct ion_custom_data)))
1245 			return -EFAULT;
1246 		return dev->custom_ioctl(client, data.cmd, data.arg);
1247 	}
1248 	default:
1249 		return -ENOTTY;
1250 	}
1251 	return 0;
1252 }
1253 
ion_release(struct inode * inode,struct file * file)1254 static int ion_release(struct inode *inode, struct file *file)
1255 {
1256 	struct ion_client *client = file->private_data;
1257 
1258 	pr_debug("%s: %d\n", __func__, __LINE__);
1259 	ion_client_destroy(client);
1260 	return 0;
1261 }
1262 
ion_open(struct inode * inode,struct file * file)1263 static int ion_open(struct inode *inode, struct file *file)
1264 {
1265 	struct miscdevice *miscdev = file->private_data;
1266 	struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1267 	struct ion_client *client;
1268 
1269 	pr_debug("%s: %d\n", __func__, __LINE__);
1270 	client = ion_client_create(dev, "user");
1271 	if (IS_ERR(client))
1272 		return PTR_ERR(client);
1273 	file->private_data = client;
1274 
1275 	return 0;
1276 }
1277 
1278 static const struct file_operations ion_fops = {
1279 	.owner          = THIS_MODULE,
1280 	.open           = ion_open,
1281 	.release        = ion_release,
1282 	.unlocked_ioctl = ion_ioctl,
1283 };
1284 
ion_debug_heap_total(struct ion_client * client,unsigned int id)1285 static size_t ion_debug_heap_total(struct ion_client *client,
1286 				   unsigned int id)
1287 {
1288 	size_t size = 0;
1289 	struct rb_node *n;
1290 
1291 	mutex_lock(&client->lock);
1292 	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1293 		struct ion_handle *handle = rb_entry(n,
1294 						     struct ion_handle,
1295 						     node);
1296 		if (handle->buffer->heap->id == id)
1297 			size += handle->buffer->size;
1298 	}
1299 	mutex_unlock(&client->lock);
1300 	return size;
1301 }
1302 
ion_debug_heap_show(struct seq_file * s,void * unused)1303 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1304 {
1305 	struct ion_heap *heap = s->private;
1306 	struct ion_device *dev = heap->dev;
1307 	struct rb_node *n;
1308 	size_t total_size = 0;
1309 	size_t total_orphaned_size = 0;
1310 
1311 	seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1312 	seq_printf(s, "----------------------------------------------------\n");
1313 
1314 	for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1315 		struct ion_client *client = rb_entry(n, struct ion_client,
1316 						     node);
1317 		size_t size = ion_debug_heap_total(client, heap->id);
1318 		if (!size)
1319 			continue;
1320 		if (client->task) {
1321 			char task_comm[TASK_COMM_LEN];
1322 
1323 			get_task_comm(task_comm, client->task);
1324 			seq_printf(s, "%16.s %16u %16u\n", task_comm,
1325 				   client->pid, size);
1326 		} else {
1327 			seq_printf(s, "%16.s %16u %16u\n", client->name,
1328 				   client->pid, size);
1329 		}
1330 	}
1331 	seq_printf(s, "----------------------------------------------------\n");
1332 	seq_printf(s, "orphaned allocations (info is from last known client):"
1333 		   "\n");
1334 	mutex_lock(&dev->buffer_lock);
1335 	for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1336 		struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1337 						     node);
1338 		if (buffer->heap->id != heap->id)
1339 			continue;
1340 		total_size += buffer->size;
1341 		if (!buffer->handle_count) {
1342 			seq_printf(s, "%16.s %16u %16u %d %d\n", buffer->task_comm,
1343 				   buffer->pid, buffer->size, buffer->kmap_cnt,
1344 				   atomic_read(&buffer->ref.refcount));
1345 			total_orphaned_size += buffer->size;
1346 		}
1347 	}
1348 	mutex_unlock(&dev->buffer_lock);
1349 	seq_printf(s, "----------------------------------------------------\n");
1350 	seq_printf(s, "%16.s %16u\n", "total orphaned",
1351 		   total_orphaned_size);
1352 	seq_printf(s, "%16.s %16u\n", "total ", total_size);
1353 	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1354 		seq_printf(s, "%16.s %16u\n", "deferred free",
1355 				heap->free_list_size);
1356 	seq_printf(s, "----------------------------------------------------\n");
1357 
1358 	if (heap->debug_show)
1359 		heap->debug_show(heap, s, unused);
1360 
1361 	return 0;
1362 }
1363 
ion_debug_heap_open(struct inode * inode,struct file * file)1364 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1365 {
1366 	return single_open(file, ion_debug_heap_show, inode->i_private);
1367 }
1368 
1369 static const struct file_operations debug_heap_fops = {
1370 	.open = ion_debug_heap_open,
1371 	.read = seq_read,
1372 	.llseek = seq_lseek,
1373 	.release = single_release,
1374 };
1375 
1376 #ifdef DEBUG_HEAP_SHRINKER
debug_shrink_set(void * data,u64 val)1377 static int debug_shrink_set(void *data, u64 val)
1378 {
1379         struct ion_heap *heap = data;
1380         struct shrink_control sc;
1381         int objs;
1382 
1383         sc.gfp_mask = -1;
1384         sc.nr_to_scan = 0;
1385 
1386         if (!val)
1387                 return 0;
1388 
1389         objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1390         sc.nr_to_scan = objs;
1391 
1392         heap->shrinker.shrink(&heap->shrinker, &sc);
1393         return 0;
1394 }
1395 
debug_shrink_get(void * data,u64 * val)1396 static int debug_shrink_get(void *data, u64 *val)
1397 {
1398         struct ion_heap *heap = data;
1399         struct shrink_control sc;
1400         int objs;
1401 
1402         sc.gfp_mask = -1;
1403         sc.nr_to_scan = 0;
1404 
1405         objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1406         *val = objs;
1407         return 0;
1408 }
1409 
1410 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1411                         debug_shrink_set, "%llu\n");
1412 #endif
1413 
ion_device_add_heap(struct ion_device * dev,struct ion_heap * heap)1414 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1415 {
1416 	if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1417 	    !heap->ops->unmap_dma)
1418 		pr_err("%s: can not add heap with invalid ops struct.\n",
1419 		       __func__);
1420 
1421 	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1422 		ion_heap_init_deferred_free(heap);
1423 
1424 	heap->dev = dev;
1425 	down_write(&dev->lock);
1426 	/* use negative heap->id to reverse the priority -- when traversing
1427 	   the list later attempt higher id numbers first */
1428 	plist_node_init(&heap->node, -heap->id);
1429 	plist_add(&heap->node, &dev->heaps);
1430 	debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1431 			    &debug_heap_fops);
1432 #ifdef DEBUG_HEAP_SHRINKER
1433 	if (heap->shrinker.shrink) {
1434 		char debug_name[64];
1435 
1436 		snprintf(debug_name, 64, "%s_shrink", heap->name);
1437 		debugfs_create_file(debug_name, 0644, dev->debug_root, heap,
1438 				    &debug_shrink_fops);
1439 	}
1440 #endif
1441 	up_write(&dev->lock);
1442 }
1443 
ion_device_create(long (* custom_ioctl)(struct ion_client * client,unsigned int cmd,unsigned long arg))1444 struct ion_device *ion_device_create(long (*custom_ioctl)
1445 				     (struct ion_client *client,
1446 				      unsigned int cmd,
1447 				      unsigned long arg))
1448 {
1449 	struct ion_device *idev;
1450 	int ret;
1451 
1452 	idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1453 	if (!idev)
1454 		return ERR_PTR(-ENOMEM);
1455 
1456 	idev->dev.minor = MISC_DYNAMIC_MINOR;
1457 	idev->dev.name = "ion";
1458 	idev->dev.fops = &ion_fops;
1459 	idev->dev.parent = NULL;
1460 	ret = misc_register(&idev->dev);
1461 	if (ret) {
1462 		pr_err("ion: failed to register misc device.\n");
1463 		return ERR_PTR(ret);
1464 	}
1465 
1466 	idev->debug_root = debugfs_create_dir("ion", NULL);
1467 	if (!idev->debug_root)
1468 		pr_err("ion: failed to create debug files.\n");
1469 
1470 	idev->custom_ioctl = custom_ioctl;
1471 	idev->buffers = RB_ROOT;
1472 	mutex_init(&idev->buffer_lock);
1473 	init_rwsem(&idev->lock);
1474 	plist_head_init(&idev->heaps);
1475 	idev->clients = RB_ROOT;
1476 	return idev;
1477 }
1478 
ion_device_destroy(struct ion_device * dev)1479 void ion_device_destroy(struct ion_device *dev)
1480 {
1481 	misc_deregister(&dev->dev);
1482 	/* XXX need to free the heaps and clients ? */
1483 	kfree(dev);
1484 }
1485 
ion_reserve(struct ion_platform_data * data)1486 void __init ion_reserve(struct ion_platform_data *data)
1487 {
1488 	int i;
1489 
1490 	for (i = 0; i < data->nr; i++) {
1491 		if (data->heaps[i].size == 0)
1492 			continue;
1493 
1494 		if (data->heaps[i].base == 0) {
1495 			phys_addr_t paddr;
1496 			paddr = memblock_alloc_base(data->heaps[i].size,
1497 						    data->heaps[i].align,
1498 						    MEMBLOCK_ALLOC_ANYWHERE);
1499 			if (!paddr) {
1500 				pr_err("%s: error allocating memblock for "
1501 				       "heap %d\n",
1502 					__func__, i);
1503 				continue;
1504 			}
1505 			data->heaps[i].base = paddr;
1506 		} else {
1507 			int ret = memblock_reserve(data->heaps[i].base,
1508 					       data->heaps[i].size);
1509 			if (ret)
1510 				pr_err("memblock reserve of %x@%lx failed\n",
1511 				       data->heaps[i].size,
1512 				       data->heaps[i].base);
1513 		}
1514 		pr_info("%s: %s reserved base %lx size %d\n", __func__,
1515 			data->heaps[i].name,
1516 			data->heaps[i].base,
1517 			data->heaps[i].size);
1518 	}
1519 }
1520