• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (c) 2019, The Linux Foundation. All rights reserved.
4  */
5 
6 #ifndef _ION_KERNEL_H
7 #define _ION_KERNEL_H
8 
9 #include <linux/dma-buf.h>
10 #include <linux/err.h>
11 #include <linux/device.h>
12 #include <linux/dma-direction.h>
13 #include <linux/kref.h>
14 #include <linux/mm_types.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/rbtree.h>
18 #include <linux/sched.h>
19 #include <linux/shrinker.h>
20 #include <linux/types.h>
21 #include <uapi/linux/ion.h>
22 
23 /**
24  * struct ion_buffer - metadata for a particular buffer
25  * @list:		element in list of deferred freeable buffers
26  * @heap:		back pointer to the heap the buffer came from
27  * @flags:		buffer specific flags
28  * @private_flags:	internal buffer specific flags
29  * @size:		size of the buffer
30  * @priv_virt:		private data to the buffer representable as
31  *			a void *
32  * @lock:		protects the buffers cnt fields
33  * @kmap_cnt:		number of times the buffer is mapped to the kernel
34  * @vaddr:		the kernel mapping if kmap_cnt is not zero
35  * @sg_table:		the sg table for the buffer
36  * @attachments:	list of devices attached to this buffer
37  */
38 struct ion_buffer {
39 	struct list_head list;
40 	struct ion_heap *heap;
41 	unsigned long flags;
42 	unsigned long private_flags;
43 	size_t size;
44 	void *priv_virt;
45 	struct mutex lock;
46 	int kmap_cnt;
47 	void *vaddr;
48 	struct sg_table *sg_table;
49 	struct list_head attachments;
50 };
51 
52 /**
53  * struct ion_heap_ops - ops to operate on a given heap
54  * @allocate:		allocate memory
55  * @free:		free memory
56  * @get_pool_size:	get pool size in pages
57  *
58  * allocate returns 0 on success, -errno on error.
59  * map_dma and map_kernel return pointer on success, ERR_PTR on
60  * error. @free will be called with ION_PRIV_FLAG_SHRINKER_FREE set in
61  * the buffer's private_flags when called from a shrinker. In that
62  * case, the pages being free'd must be truly free'd back to the
63  * system, not put in a page pool or otherwise cached.
64  */
65 struct ion_heap_ops {
66 	int (*allocate)(struct ion_heap *heap,
67 			struct ion_buffer *buffer, unsigned long len,
68 			unsigned long flags);
69 	void (*free)(struct ion_buffer *buffer);
70 	int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan);
71 	long (*get_pool_size)(struct ion_heap *heap);
72 };
73 
74 /**
75  * heap flags - flags between the heaps and core ion code
76  */
77 #define ION_HEAP_FLAG_DEFER_FREE BIT(0)
78 
79 /**
80  * private flags - flags internal to ion
81  */
82 /*
83  * Buffer is being freed from a shrinker function. Skip any possible
84  * heap-specific caching mechanism (e.g. page pools). Guarantees that
85  * any buffer storage that came from the system allocator will be
86  * returned to the system allocator.
87  */
88 #define ION_PRIV_FLAG_SHRINKER_FREE BIT(0)
89 
90 /**
91  * struct ion_heap - represents a heap in the system
92  * @node:		rb node to put the heap on the device's tree of heaps
93  * @type:		type of heap
94  * @ops:		ops struct as above
95  * @buf_ops:		dma_buf ops specific to the heap implementation.
96  * @flags:		flags
97  * @id:			id of heap, also indicates priority of this heap when
98  *			allocating.  These are specified by platform data and
99  *			MUST be unique
100  * @name:		used for debugging
101  * @owner:		kernel module that implements this heap
102  * @shrinker:		a shrinker for the heap
103  * @free_list:		free list head if deferred free is used
104  * @free_list_size	size of the deferred free list in bytes
105  * @lock:		protects the free list
106  * @waitqueue:		queue to wait on from deferred free thread
107  * @task:		task struct of deferred free thread
108  * @num_of_buffers	the number of currently allocated buffers
109  * @num_of_alloc_bytes	the number of allocated bytes
110  * @alloc_bytes_wm	the number of allocated bytes watermark
111  *
112  * Represents a pool of memory from which buffers can be made.  In some
113  * systems the only heap is regular system memory allocated via vmalloc.
114  * On others, some blocks might require large physically contiguous buffers
115  * that are allocated from a specially reserved heap.
116  */
117 struct ion_heap {
118 	struct plist_node node;
119 	enum ion_heap_type type;
120 	struct ion_heap_ops *ops;
121 	struct dma_buf_ops buf_ops;
122 	unsigned long flags;
123 	unsigned int id;
124 	const char *name;
125 	struct module *owner;
126 
127 	/* deferred free support */
128 	struct shrinker shrinker;
129 	struct list_head free_list;
130 	size_t free_list_size;
131 	spinlock_t free_lock;
132 	wait_queue_head_t waitqueue;
133 	struct task_struct *task;
134 
135 	/* heap statistics */
136 	u64 num_of_buffers;
137 	u64 num_of_alloc_bytes;
138 	u64 alloc_bytes_wm;
139 
140 	/* protect heap statistics */
141 	spinlock_t stat_lock;
142 
143 	/* heap's debugfs root */
144 	struct dentry *debugfs_dir;
145 };
146 
147 #define ion_device_add_heap(heap) __ion_device_add_heap(heap, THIS_MODULE)
148 
149 /**
150  * struct ion_dma_buf_attachment - hold device-table attachment data for buffer
151  * @dev:	device attached to the buffer.
152  * @table:	cached mapping.
153  * @list:	list of ion_dma_buf_attachment.
154  */
155 struct ion_dma_buf_attachment {
156 	struct device *dev;
157 	struct sg_table *table;
158 	struct list_head list;
159 	bool mapped:1;
160 };
161 
162 #ifdef CONFIG_ION
163 
164 /**
165  * __ion_device_add_heap - adds a heap to the ion device
166  *
167  * @heap:               the heap to add
168  *
169  * Returns 0 on success, negative error otherwise.
170  */
171 int __ion_device_add_heap(struct ion_heap *heap, struct module *owner);
172 
173 /**
174  * ion_device_remove_heap - removes a heap from ion device
175  *
176  * @heap:		pointer to the heap to be removed
177  */
178 void ion_device_remove_heap(struct ion_heap *heap);
179 
180 /**
181  * ion_heap_init_shrinker
182  * @heap:		the heap
183  *
184  * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag or defines the shrink op
185  * this function will be called to setup a shrinker to shrink the freelists
186  * and call the heap's shrink op.
187  */
188 int ion_heap_init_shrinker(struct ion_heap *heap);
189 
190 /**
191  * ion_heap_init_deferred_free -- initialize deferred free functionality
192  * @heap:		the heap
193  *
194  * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will
195  * be called to setup deferred frees. Calls to free the buffer will
196  * return immediately and the actual free will occur some time later
197  */
198 int ion_heap_init_deferred_free(struct ion_heap *heap);
199 
200 /**
201  * ion_heap_freelist_add - add a buffer to the deferred free list
202  * @heap:		the heap
203  * @buffer:		the buffer
204  *
205  * Adds an item to the deferred freelist.
206  */
207 void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer);
208 
209 /**
210  * ion_heap_freelist_drain - drain the deferred free list
211  * @heap:		the heap
212  * @size:		amount of memory to drain in bytes
213  *
214  * Drains the indicated amount of memory from the deferred freelist immediately.
215  * Returns the total amount freed.  The total freed may be higher depending
216  * on the size of the items in the list, or lower if there is insufficient
217  * total memory on the freelist.
218  */
219 size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size);
220 
221 /**
222  * ion_heap_freelist_shrink - drain the deferred free
223  *				list, skipping any heap-specific
224  *				pooling or caching mechanisms
225  *
226  * @heap:		the heap
227  * @size:		amount of memory to drain in bytes
228  *
229  * Drains the indicated amount of memory from the deferred freelist immediately.
230  * Returns the total amount freed.  The total freed may be higher depending
231  * on the size of the items in the list, or lower if there is insufficient
232  * total memory on the freelist.
233  *
234  * Unlike with @ion_heap_freelist_drain, don't put any pages back into
235  * page pools or otherwise cache the pages. Everything must be
236  * genuinely free'd back to the system. If you're free'ing from a
237  * shrinker you probably want to use this. Note that this relies on
238  * the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE
239  * flag.
240  */
241 size_t ion_heap_freelist_shrink(struct ion_heap *heap,
242 				size_t size);
243 
244 /**
245  * ion_heap_freelist_size - returns the size of the freelist in bytes
246  * @heap:		the heap
247  */
248 size_t ion_heap_freelist_size(struct ion_heap *heap);
249 
250 /**
251  * ion_heap_map_kernel - map the ion_buffer in kernel virtual address space.
252  *
253  * @heap:               the heap
254  * @buffer:             buffer to be mapped
255  *
256  * Maps the buffer using vmap(). The function respects cache flags for the
257  * buffer and creates the page table entries accordingly. Returns virtual
258  * address at the beginning of the buffer or ERR_PTR.
259  */
260 void *ion_heap_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer);
261 
262 /**
263  * ion_heap_unmap_kernel - unmap ion_buffer
264  *
265  * @buffer:             buffer to be unmapped
266  *
267  * ION wrapper for vunmap() of the ion buffer.
268  */
269 void ion_heap_unmap_kernel(struct ion_heap *heap, struct ion_buffer *buffer);
270 
271 /**
272  * ion_heap_map_user - map given ion buffer in provided vma
273  *
274  * @heap:               the heap this buffer belongs to
275  * @buffer:             Ion buffer to be mapped
276  * @vma:                vma of the process where buffer should be mapped.
277  *
278  * Maps the buffer using remap_pfn_range() into specific process's vma starting
279  * with vma->vm_start. The vma size is expected to be >= ion buffer size.
280  * If not, a partial buffer mapping may be created. Returns 0 on success.
281  */
282 int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
283 		      struct vm_area_struct *vma);
284 
285 /* ion_buffer_zero - zeroes out an ion buffer respecting the ION_FLAGs.
286  *
287  * @buffer:		ion_buffer to zero
288  *
289  * Returns 0 on success, negative error otherwise.
290  */
291 int ion_buffer_zero(struct ion_buffer *buffer);
292 
293 /**
294  * ion_buffer_prep_noncached - flush cache before non-cached mapping
295  *
296  * @buffer:		ion_buffer to flush
297  *
298  * The memory allocated by the heap could be in the CPU cache. To map
299  * this memory as non-cached, we need to flush the associated cache
300  * first. Without the flush, it is possible for stale dirty cache lines
301  * to be evicted after the ION client started writing into this buffer,
302  * leading to data corruption.
303  */
304 void ion_buffer_prep_noncached(struct ion_buffer *buffer);
305 
306 /**
307  * ion_alloc - Allocates an ion buffer of given size from given heap
308  *
309  * @len:               size of the buffer to be allocated.
310  * @heap_id_mask:      a bitwise maks of heap ids to allocate from
311  * @flags:             ION_BUFFER_XXXX flags for the new buffer.
312  *
313  * The function exports a dma_buf object for the new ion buffer internally
314  * and returns that to the caller. So, the buffer is ready to be used by other
315  * drivers immediately. Returns ERR_PTR in case of failure.
316  */
317 struct dma_buf *ion_alloc(size_t len, unsigned int heap_id_mask,
318 			  unsigned int flags);
319 
320 /**
321  * ion_free - Releases the ion buffer.
322  *
323  * @buffer:             ion buffer to be released
324  */
325 int ion_free(struct ion_buffer *buffer);
326 
327 /**
328  * ion_query_heaps_kernel - Returns information about available heaps to
329  * in-kernel clients.
330  *
331  * @hdata:             pointer to array of struct ion_heap_data.
332  * @size:             size of @hdata array.
333  *
334  * Returns the number of available heaps and populates @hdata with information
335  * regarding the same. When invoked with @size as 0, the function with return
336  * the number of available heaps without modifying @hdata. When the number of
337  * available heaps is higher than @size, @size is returned instead of the
338  * actual number of available heaps.
339  */
340 
341 size_t ion_query_heaps_kernel(struct ion_heap_data *hdata, size_t size);
342 #else
343 
__ion_device_add_heap(struct ion_heap * heap,struct module * owner)344 static inline int __ion_device_add_heap(struct ion_heap *heap,
345 				      struct module *owner)
346 {
347 	return -ENODEV;
348 }
349 
ion_heap_init_shrinker(struct ion_heap * heap)350 static inline int ion_heap_init_shrinker(struct ion_heap *heap)
351 {
352 	return -ENODEV;
353 }
354 
ion_heap_init_deferred_free(struct ion_heap * heap)355 static inline int ion_heap_init_deferred_free(struct ion_heap *heap)
356 {
357 	return -ENODEV;
358 }
359 
ion_heap_freelist_add(struct ion_heap * heap,struct ion_buffer * buffer)360 static inline void ion_heap_freelist_add(struct ion_heap *heap,
361 					 struct ion_buffer *buffer) {}
362 
ion_heap_freelist_drain(struct ion_heap * heap,size_t size)363 static inline size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
364 {
365 	return -ENODEV;
366 }
367 
ion_heap_freelist_shrink(struct ion_heap * heap,size_t size)368 static inline size_t ion_heap_freelist_shrink(struct ion_heap *heap,
369 					      size_t size)
370 {
371 	return -ENODEV;
372 }
373 
ion_heap_freelist_size(struct ion_heap * heap)374 static inline size_t ion_heap_freelist_size(struct ion_heap *heap)
375 {
376 	return -ENODEV;
377 }
378 
ion_heap_map_kernel(struct ion_heap * heap,struct ion_buffer * buffer)379 static inline void *ion_heap_map_kernel(struct ion_heap *heap,
380 					struct ion_buffer *buffer)
381 {
382 	return ERR_PTR(-ENODEV);
383 }
384 
ion_heap_unmap_kernel(struct ion_heap * heap,struct ion_buffer * buffer)385 static inline void ion_heap_unmap_kernel(struct ion_heap *heap,
386 					 struct ion_buffer *buffer) {}
387 
ion_heap_map_user(struct ion_heap * heap,struct ion_buffer * buffer,struct vm_area_struct * vma)388 static inline int ion_heap_map_user(struct ion_heap *heap,
389 				    struct ion_buffer *buffer,
390 				    struct vm_area_struct *vma)
391 {
392 	return -ENODEV;
393 }
394 
ion_buffer_zero(struct ion_buffer * buffer)395 static inline int ion_buffer_zero(struct ion_buffer *buffer)
396 {
397 	return -EINVAL;
398 }
399 
ion_buffer_prep_noncached(struct ion_buffer * buffer)400 static inline void ion_buffer_prep_noncached(struct ion_buffer *buffer) {}
401 
ion_alloc(size_t len,unsigned int heap_id_mask,unsigned int flags)402 static inline struct dma_buf *ion_alloc(size_t len, unsigned int heap_id_mask,
403 					unsigned int flags)
404 {
405 	return ERR_PTR(-ENOMEM);
406 }
407 
ion_free(struct ion_buffer * buffer)408 static inline int ion_free(struct ion_buffer *buffer)
409 {
410 	return 0;
411 }
412 
ion_query_heaps_kernel(struct ion_heap_data * hdata,size_t size)413 static inline size_t ion_query_heaps_kernel(struct ion_heap_data *hdata,
414 					 size_t size)
415 {
416 	return 0;
417 }
418 #endif /* CONFIG_ION */
419 #endif /* _ION_KERNEL_H */
420