• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * drivers/staging/android/ion/ion_priv.h
3  *
4  * Copyright (C) 2011 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16 
17 #ifndef _ION_PRIV_H
18 #define _ION_PRIV_H
19 
20 #include <linux/device.h>
21 #include <linux/dma-direction.h>
22 #include <linux/kref.h>
23 #include <linux/mm_types.h>
24 #include <linux/mutex.h>
25 #include <linux/rbtree.h>
26 #include <linux/sched.h>
27 #include <linux/shrinker.h>
28 #include <linux/types.h>
29 #ifdef CONFIG_ION_POOL_CACHE_POLICY
30 #include <asm/cacheflush.h>
31 #endif
32 
33 #include "ion.h"
34 
35 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
36 
37 /**
38  * struct ion_buffer - metadata for a particular buffer
39  * @ref:		reference count
40  * @node:		node in the ion_device buffers tree
41  * @dev:		back pointer to the ion_device
42  * @heap:		back pointer to the heap the buffer came from
43  * @flags:		buffer specific flags
44  * @private_flags:	internal buffer specific flags
45  * @size:		size of the buffer
46  * @priv_virt:		private data to the buffer representable as
47  *			a void *
48  * @priv_phys:		private data to the buffer representable as
49  *			an ion_phys_addr_t (and someday a phys_addr_t)
50  * @lock:		protects the buffers cnt fields
51  * @kmap_cnt:		number of times the buffer is mapped to the kernel
52  * @vaddr:		the kernel mapping if kmap_cnt is not zero
53  * @dmap_cnt:		number of times the buffer is mapped for dma
54  * @sg_table:		the sg table for the buffer if dmap_cnt is not zero
55  * @pages:		flat array of pages in the buffer -- used by fault
56  *			handler and only valid for buffers that are faulted in
57  * @vmas:		list of vma's mapping this buffer
58  * @handle_count:	count of handles referencing this buffer
59  * @task_comm:		taskcomm of last client to reference this buffer in a
60  *			handle, used for debugging
61  * @pid:		pid of last client to reference this buffer in a
62  *			handle, used for debugging
63 */
64 struct ion_buffer {
65 	struct kref ref;
66 	union {
67 		struct rb_node node;
68 		struct list_head list;
69 	};
70 	struct ion_device *dev;
71 	struct ion_heap *heap;
72 	unsigned long flags;
73 	unsigned long private_flags;
74 	size_t size;
75 	union {
76 		void *priv_virt;
77 		ion_phys_addr_t priv_phys;
78 	};
79 	struct mutex lock;
80 	int kmap_cnt;
81 	void *vaddr;
82 	int dmap_cnt;
83 	struct sg_table *sg_table;
84 	struct page **pages;
85 	struct list_head vmas;
86 	/* used to track orphaned buffers */
87 	int handle_count;
88 	char task_comm[TASK_COMM_LEN];
89 	pid_t pid;
90 };
91 void ion_buffer_destroy(struct ion_buffer *buffer);
92 
93 /**
94  * struct ion_heap_ops - ops to operate on a given heap
95  * @allocate:		allocate memory
96  * @free:		free memory
97  * @phys		get physical address of a buffer (only define on
98  *			physically contiguous heaps)
99  * @map_dma		map the memory for dma to a scatterlist
100  * @unmap_dma		unmap the memory for dma
101  * @map_kernel		map memory to the kernel
102  * @unmap_kernel	unmap memory to the kernel
103  * @map_user		map memory to userspace
104  *
105  * allocate, phys, and map_user return 0 on success, -errno on error.
106  * map_dma and map_kernel return pointer on success, ERR_PTR on
107  * error. @free will be called with ION_PRIV_FLAG_SHRINKER_FREE set in
108  * the buffer's private_flags when called from a shrinker. In that
109  * case, the pages being free'd must be truly free'd back to the
110  * system, not put in a page pool or otherwise cached.
111  */
112 struct ion_heap_ops {
113 	int (*allocate)(struct ion_heap *heap,
114 			struct ion_buffer *buffer, unsigned long len,
115 			unsigned long align, unsigned long flags);
116 	void (*free)(struct ion_buffer *buffer);
117 	int (*phys)(struct ion_heap *heap, struct ion_buffer *buffer,
118 		    ion_phys_addr_t *addr, size_t *len);
119 	struct sg_table * (*map_dma)(struct ion_heap *heap,
120 				     struct ion_buffer *buffer);
121 	void (*unmap_dma)(struct ion_heap *heap, struct ion_buffer *buffer);
122 	void * (*map_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
123 	void (*unmap_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
124 	int (*map_user)(struct ion_heap *mapper, struct ion_buffer *buffer,
125 			struct vm_area_struct *vma);
126 	int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan);
127 };
128 
129 /**
130  * heap flags - flags between the heaps and core ion code
131  */
132 #define ION_HEAP_FLAG_DEFER_FREE (1 << 0)
133 
134 /**
135  * private flags - flags internal to ion
136  */
137 /*
138  * Buffer is being freed from a shrinker function. Skip any possible
139  * heap-specific caching mechanism (e.g. page pools). Guarantees that
140  * any buffer storage that came from the system allocator will be
141  * returned to the system allocator.
142  */
143 #define ION_PRIV_FLAG_SHRINKER_FREE (1 << 0)
144 
145 /**
146  * struct ion_heap - represents a heap in the system
147  * @node:		rb node to put the heap on the device's tree of heaps
148  * @dev:		back pointer to the ion_device
149  * @type:		type of heap
150  * @ops:		ops struct as above
151  * @flags:		flags
152  * @id:			id of heap, also indicates priority of this heap when
153  *			allocating.  These are specified by platform data and
154  *			MUST be unique
155  * @name:		used for debugging
156  * @shrinker:		a shrinker for the heap
157  * @free_list:		free list head if deferred free is used
158  * @free_list_size	size of the deferred free list in bytes
159  * @lock:		protects the free list
160  * @waitqueue:		queue to wait on from deferred free thread
161  * @task:		task struct of deferred free thread
162  * @debug_show:		called when heap debug file is read to add any
163  *			heap specific debug info to output
164  *
165  * Represents a pool of memory from which buffers can be made.  In some
166  * systems the only heap is regular system memory allocated via vmalloc.
167  * On others, some blocks might require large physically contiguous buffers
168  * that are allocated from a specially reserved heap.
169  */
170 struct ion_heap {
171 	struct plist_node node;
172 	struct ion_device *dev;
173 	enum ion_heap_type type;
174 	struct ion_heap_ops *ops;
175 	unsigned long flags;
176 	unsigned int id;
177 	const char *name;
178 	struct shrinker shrinker;
179 	struct list_head free_list;
180 	size_t free_list_size;
181 	spinlock_t free_lock;
182 	wait_queue_head_t waitqueue;
183 	struct task_struct *task;
184 
185 	int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *);
186 };
187 
188 /**
189  * ion_buffer_cached - this ion buffer is cached
190  * @buffer:		buffer
191  *
192  * indicates whether this ion buffer is cached
193  */
194 bool ion_buffer_cached(struct ion_buffer *buffer);
195 
196 /**
197  * ion_buffer_fault_user_mappings - fault in user mappings of this buffer
198  * @buffer:		buffer
199  *
200  * indicates whether userspace mappings of this buffer will be faulted
201  * in, this can affect how buffers are allocated from the heap.
202  */
203 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer);
204 
205 /**
206  * ion_device_create - allocates and returns an ion device
207  * @custom_ioctl:	arch specific ioctl function if applicable
208  *
209  * returns a valid device or -PTR_ERR
210  */
211 struct ion_device *ion_device_create(long (*custom_ioctl)
212 				     (struct ion_client *client,
213 				      unsigned int cmd,
214 				      unsigned long arg));
215 
216 /**
217  * ion_device_destroy - free and device and it's resource
218  * @dev:		the device
219  */
220 void ion_device_destroy(struct ion_device *dev);
221 
222 /**
223  * ion_device_add_heap - adds a heap to the ion device
224  * @dev:		the device
225  * @heap:		the heap to add
226  */
227 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap);
228 
229 /**
230  * some helpers for common operations on buffers using the sg_table
231  * and vaddr fields
232  */
233 void *ion_heap_map_kernel(struct ion_heap *, struct ion_buffer *);
234 void ion_heap_unmap_kernel(struct ion_heap *, struct ion_buffer *);
235 int ion_heap_map_user(struct ion_heap *, struct ion_buffer *,
236 			struct vm_area_struct *);
237 int ion_heap_buffer_zero(struct ion_buffer *buffer);
238 int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot);
239 
240 /**
241  * ion_heap_init_shrinker
242  * @heap:		the heap
243  *
244  * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag or defines the shrink op
245  * this function will be called to setup a shrinker to shrink the freelists
246  * and call the heap's shrink op.
247  */
248 void ion_heap_init_shrinker(struct ion_heap *heap);
249 
250 /**
251  * ion_heap_init_deferred_free -- initialize deferred free functionality
252  * @heap:		the heap
253  *
254  * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will
255  * be called to setup deferred frees. Calls to free the buffer will
256  * return immediately and the actual free will occur some time later
257  */
258 int ion_heap_init_deferred_free(struct ion_heap *heap);
259 
260 /**
261  * ion_heap_freelist_add - add a buffer to the deferred free list
262  * @heap:		the heap
263  * @buffer:		the buffer
264  *
265  * Adds an item to the deferred freelist.
266  */
267 void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer);
268 
269 /**
270  * ion_heap_freelist_drain - drain the deferred free list
271  * @heap:		the heap
272  * @size:		amount of memory to drain in bytes
273  *
274  * Drains the indicated amount of memory from the deferred freelist immediately.
275  * Returns the total amount freed.  The total freed may be higher depending
276  * on the size of the items in the list, or lower if there is insufficient
277  * total memory on the freelist.
278  */
279 size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size);
280 
281 /**
282  * ion_heap_freelist_shrink - drain the deferred free
283  *				list, skipping any heap-specific
284  *				pooling or caching mechanisms
285  *
286  * @heap:		the heap
287  * @size:		amount of memory to drain in bytes
288  *
289  * Drains the indicated amount of memory from the deferred freelist immediately.
290  * Returns the total amount freed.  The total freed may be higher depending
291  * on the size of the items in the list, or lower if there is insufficient
292  * total memory on the freelist.
293  *
294  * Unlike with @ion_heap_freelist_drain, don't put any pages back into
295  * page pools or otherwise cache the pages. Everything must be
296  * genuinely free'd back to the system. If you're free'ing from a
297  * shrinker you probably want to use this. Note that this relies on
298  * the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE
299  * flag.
300  */
301 size_t ion_heap_freelist_shrink(struct ion_heap *heap,
302 					size_t size);
303 
304 /**
305  * ion_heap_freelist_size - returns the size of the freelist in bytes
306  * @heap:		the heap
307  */
308 size_t ion_heap_freelist_size(struct ion_heap *heap);
309 
310 
311 /**
312  * functions for creating and destroying the built in ion heaps.
313  * architectures can add their own custom architecture specific
314  * heaps as appropriate.
315  */
316 
317 struct ion_heap *ion_heap_create(struct ion_platform_heap *);
318 void ion_heap_destroy(struct ion_heap *);
319 struct ion_heap *ion_system_heap_create(struct ion_platform_heap *);
320 void ion_system_heap_destroy(struct ion_heap *);
321 
322 struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *);
323 void ion_system_contig_heap_destroy(struct ion_heap *);
324 
325 struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *);
326 void ion_carveout_heap_destroy(struct ion_heap *);
327 
328 struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *);
329 void ion_chunk_heap_destroy(struct ion_heap *);
330 struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *);
331 void ion_cma_heap_destroy(struct ion_heap *);
332 
333 /**
334  * kernel api to allocate/free from carveout -- used when carveout is
335  * used to back an architecture specific custom heap
336  */
337 ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size,
338 				      unsigned long align);
339 void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
340 		       unsigned long size);
341 /**
342  * The carveout heap returns physical addresses, since 0 may be a valid
343  * physical address, this is used to indicate allocation failed
344  */
345 #define ION_CARVEOUT_ALLOCATE_FAIL -1
346 
347 /**
348  * functions for creating and destroying a heap pool -- allows you
349  * to keep a pool of pre allocated memory to use from your heap.  Keeping
350  * a pool of memory that is ready for dma, ie any cached mapping have been
351  * invalidated from the cache, provides a significant performance benefit on
352  * many systems
353  */
354 
355 /**
356  * struct ion_page_pool - pagepool struct
357  * @high_count:		number of highmem items in the pool
358  * @low_count:		number of lowmem items in the pool
359  * @high_items:		list of highmem items
360  * @low_items:		list of lowmem items
361  * @mutex:		lock protecting this struct and especially the count
362  *			item list
363  * @gfp_mask:		gfp_mask to use from alloc
364  * @order:		order of pages in the pool
365  * @list:		plist node for list of pools
366  *
367  * Allows you to keep a pool of pre allocated pages to use from your heap.
368  * Keeping a pool of pages that is ready for dma, ie any cached mapping have
369  * been invalidated from the cache, provides a significant performance benefit
370  * on many systems
371  */
372 struct ion_page_pool {
373 	int high_count;
374 	int low_count;
375 	struct list_head high_items;
376 	struct list_head low_items;
377 	struct mutex mutex;
378 	gfp_t gfp_mask;
379 	unsigned int order;
380 	struct plist_node list;
381 };
382 
383 struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order);
384 void ion_page_pool_destroy(struct ion_page_pool *);
385 struct page *ion_page_pool_alloc(struct ion_page_pool *);
386 void ion_page_pool_free(struct ion_page_pool *, struct page *);
387 void ion_page_pool_free_immediate(struct ion_page_pool *, struct page *);
388 
389 #ifdef CONFIG_ION_POOL_CACHE_POLICY
ion_page_pool_alloc_set_cache_policy(struct ion_page_pool * pool,struct page * page)390 static inline void ion_page_pool_alloc_set_cache_policy
391 				(struct ion_page_pool *pool,
392 				struct page *page){
393 	void *va = page_address(page);
394 
395 	if (va)
396 		set_memory_wc((unsigned long)va, 1 << pool->order);
397 }
398 
ion_page_pool_free_set_cache_policy(struct ion_page_pool * pool,struct page * page)399 static inline void ion_page_pool_free_set_cache_policy
400 				(struct ion_page_pool *pool,
401 				struct page *page){
402 	void *va = page_address(page);
403 
404 	if (va)
405 		set_memory_wb((unsigned long)va, 1 << pool->order);
406 
407 }
408 #else
ion_page_pool_alloc_set_cache_policy(struct ion_page_pool * pool,struct page * page)409 static inline void ion_page_pool_alloc_set_cache_policy
410 				(struct ion_page_pool *pool,
411 				struct page *page){ }
412 
ion_page_pool_free_set_cache_policy(struct ion_page_pool * pool,struct page * page)413 static inline void ion_page_pool_free_set_cache_policy
414 				(struct ion_page_pool *pool,
415 				struct page *page){ }
416 #endif
417 
418 
419 /** ion_page_pool_shrink - shrinks the size of the memory cached in the pool
420  * @pool:		the pool
421  * @gfp_mask:		the memory type to reclaim
422  * @nr_to_scan:		number of items to shrink in pages
423  *
424  * returns the number of items freed in pages
425  */
426 int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
427 			  int nr_to_scan);
428 
429 /**
430  * ion_pages_sync_for_device - cache flush pages for use with the specified
431  *                             device
432  * @dev:		the device the pages will be used with
433  * @page:		the first page to be flushed
434  * @size:		size in bytes of region to be flushed
435  * @dir:		direction of dma transfer
436  */
437 void ion_pages_sync_for_device(struct device *dev, struct page *page,
438 		size_t size, enum dma_data_direction dir);
439 
440 #endif /* _ION_PRIV_H */
441