1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (c) 2019, The Linux Foundation. All rights reserved.
4 */
5
6 #ifndef _ION_KERNEL_H
7 #define _ION_KERNEL_H
8
9 #include <linux/dma-buf.h>
10 #include <linux/err.h>
11 #include <linux/device.h>
12 #include <linux/dma-direction.h>
13 #include <linux/kref.h>
14 #include <linux/mm_types.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/rbtree.h>
18 #include <linux/sched.h>
19 #include <linux/shrinker.h>
20 #include <linux/types.h>
21 #include <uapi/linux/ion.h>
22
23 /**
24 * struct ion_buffer - metadata for a particular buffer
25 * @list: element in list of deferred freeable buffers
26 * @heap: back pointer to the heap the buffer came from
27 * @flags: buffer specific flags
28 * @private_flags: internal buffer specific flags
29 * @size: size of the buffer
30 * @priv_virt: private data to the buffer representable as
31 * a void *
32 * @lock: protects the buffers cnt fields
33 * @kmap_cnt: number of times the buffer is mapped to the kernel
34 * @vaddr: the kernel mapping if kmap_cnt is not zero
35 * @sg_table: the sg table for the buffer
36 * @attachments: list of devices attached to this buffer
37 */
38 struct ion_buffer {
39 struct list_head list;
40 struct ion_heap *heap;
41 unsigned long flags;
42 unsigned long private_flags;
43 size_t size;
44 void *priv_virt;
45 struct mutex lock;
46 int kmap_cnt;
47 void *vaddr;
48 struct sg_table *sg_table;
49 struct list_head attachments;
50 };
51
52 /**
53 * struct ion_heap_ops - ops to operate on a given heap
54 * @allocate: allocate memory
55 * @free: free memory
56 *
57 * allocate returns 0 on success, -errno on error.
58 * map_dma and map_kernel return pointer on success, ERR_PTR on
59 * error. @free will be called with ION_PRIV_FLAG_SHRINKER_FREE set in
60 * the buffer's private_flags when called from a shrinker. In that
61 * case, the pages being free'd must be truly free'd back to the
62 * system, not put in a page pool or otherwise cached.
63 */
64 struct ion_heap_ops {
65 int (*allocate)(struct ion_heap *heap,
66 struct ion_buffer *buffer, unsigned long len,
67 unsigned long flags);
68 void (*free)(struct ion_buffer *buffer);
69 int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan);
70 };
71
72 /**
73 * heap flags - flags between the heaps and core ion code
74 */
75 #define ION_HEAP_FLAG_DEFER_FREE BIT(0)
76
77 /**
78 * private flags - flags internal to ion
79 */
80 /*
81 * Buffer is being freed from a shrinker function. Skip any possible
82 * heap-specific caching mechanism (e.g. page pools). Guarantees that
83 * any buffer storage that came from the system allocator will be
84 * returned to the system allocator.
85 */
86 #define ION_PRIV_FLAG_SHRINKER_FREE BIT(0)
87
88 /**
89 * struct ion_heap - represents a heap in the system
90 * @node: rb node to put the heap on the device's tree of heaps
91 * @type: type of heap
92 * @ops: ops struct as above
93 * @buf_ops: dma_buf ops specific to the heap implementation.
94 * @flags: flags
95 * @id: id of heap, also indicates priority of this heap when
96 * allocating. These are specified by platform data and
97 * MUST be unique
98 * @name: used for debugging
99 * @owner: kernel module that implements this heap
100 * @shrinker: a shrinker for the heap
101 * @free_list: free list head if deferred free is used
102 * @free_list_size size of the deferred free list in bytes
103 * @lock: protects the free list
104 * @waitqueue: queue to wait on from deferred free thread
105 * @task: task struct of deferred free thread
106 * @num_of_buffers the number of currently allocated buffers
107 * @num_of_alloc_bytes the number of allocated bytes
108 * @alloc_bytes_wm the number of allocated bytes watermark
109 *
110 * Represents a pool of memory from which buffers can be made. In some
111 * systems the only heap is regular system memory allocated via vmalloc.
112 * On others, some blocks might require large physically contiguous buffers
113 * that are allocated from a specially reserved heap.
114 */
115 struct ion_heap {
116 struct plist_node node;
117 enum ion_heap_type type;
118 struct ion_heap_ops *ops;
119 struct dma_buf_ops buf_ops;
120 unsigned long flags;
121 unsigned int id;
122 const char *name;
123 struct module *owner;
124
125 /* deferred free support */
126 struct shrinker shrinker;
127 struct list_head free_list;
128 size_t free_list_size;
129 spinlock_t free_lock;
130 wait_queue_head_t waitqueue;
131 struct task_struct *task;
132
133 /* heap statistics */
134 u64 num_of_buffers;
135 u64 num_of_alloc_bytes;
136 u64 alloc_bytes_wm;
137
138 /* protect heap statistics */
139 spinlock_t stat_lock;
140
141 /* heap's debugfs root */
142 struct dentry *debugfs_dir;
143 };
144
145 #define ion_device_add_heap(heap) __ion_device_add_heap(heap, THIS_MODULE)
146
147 #ifdef CONFIG_ION
148
149 /**
150 * __ion_device_add_heap - adds a heap to the ion device
151 *
152 * @heap: the heap to add
153 *
154 * Returns 0 on success, negative error otherwise.
155 */
156 int __ion_device_add_heap(struct ion_heap *heap, struct module *owner);
157
158 /**
159 * ion_device_remove_heap - removes a heap from ion device
160 *
161 * @heap: pointer to the heap to be removed
162 */
163 void ion_device_remove_heap(struct ion_heap *heap);
164
165 /**
166 * ion_heap_init_shrinker
167 * @heap: the heap
168 *
169 * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag or defines the shrink op
170 * this function will be called to setup a shrinker to shrink the freelists
171 * and call the heap's shrink op.
172 */
173 int ion_heap_init_shrinker(struct ion_heap *heap);
174
175 /**
176 * ion_heap_init_deferred_free -- initialize deferred free functionality
177 * @heap: the heap
178 *
179 * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will
180 * be called to setup deferred frees. Calls to free the buffer will
181 * return immediately and the actual free will occur some time later
182 */
183 int ion_heap_init_deferred_free(struct ion_heap *heap);
184
185 /**
186 * ion_heap_freelist_add - add a buffer to the deferred free list
187 * @heap: the heap
188 * @buffer: the buffer
189 *
190 * Adds an item to the deferred freelist.
191 */
192 void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer);
193
194 /**
195 * ion_heap_freelist_drain - drain the deferred free list
196 * @heap: the heap
197 * @size: amount of memory to drain in bytes
198 *
199 * Drains the indicated amount of memory from the deferred freelist immediately.
200 * Returns the total amount freed. The total freed may be higher depending
201 * on the size of the items in the list, or lower if there is insufficient
202 * total memory on the freelist.
203 */
204 size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size);
205
206 /**
207 * ion_heap_freelist_shrink - drain the deferred free
208 * list, skipping any heap-specific
209 * pooling or caching mechanisms
210 *
211 * @heap: the heap
212 * @size: amount of memory to drain in bytes
213 *
214 * Drains the indicated amount of memory from the deferred freelist immediately.
215 * Returns the total amount freed. The total freed may be higher depending
216 * on the size of the items in the list, or lower if there is insufficient
217 * total memory on the freelist.
218 *
219 * Unlike with @ion_heap_freelist_drain, don't put any pages back into
220 * page pools or otherwise cache the pages. Everything must be
221 * genuinely free'd back to the system. If you're free'ing from a
222 * shrinker you probably want to use this. Note that this relies on
223 * the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE
224 * flag.
225 */
226 size_t ion_heap_freelist_shrink(struct ion_heap *heap,
227 size_t size);
228
229 /**
230 * ion_heap_freelist_size - returns the size of the freelist in bytes
231 * @heap: the heap
232 */
233 size_t ion_heap_freelist_size(struct ion_heap *heap);
234
235 /**
236 * ion_heap_map_kernel - map the ion_buffer in kernel virtual address space.
237 *
238 * @heap: the heap
239 * @buffer: buffer to be mapped
240 *
241 * Maps the buffer using vmap(). The function respects cache flags for the
242 * buffer and creates the page table entries accordingly. Returns virtual
243 * address at the beginning of the buffer or ERR_PTR.
244 */
245 void *ion_heap_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer);
246
247 /**
248 * ion_heap_unmap_kernel - unmap ion_buffer
249 *
250 * @buffer: buffer to be unmapped
251 *
252 * ION wrapper for vunmap() of the ion buffer.
253 */
254 void ion_heap_unmap_kernel(struct ion_heap *heap, struct ion_buffer *buffer);
255
256 /**
257 * ion_heap_map_user - map given ion buffer in provided vma
258 *
259 * @heap: the heap this buffer belongs to
260 * @buffer: Ion buffer to be mapped
261 * @vma: vma of the process where buffer should be mapped.
262 *
263 * Maps the buffer using remap_pfn_range() into specific process's vma starting
264 * with vma->vm_start. The vma size is expected to be >= ion buffer size.
265 * If not, a partial buffer mapping may be created. Returns 0 on success.
266 */
267 int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
268 struct vm_area_struct *vma);
269
270 /* ion_buffer_zero - zeroes out an ion buffer respecting the ION_FLAGs.
271 *
272 * @buffer: ion_buffer to zero
273 *
274 * Returns 0 on success, negative error otherwise.
275 */
276 int ion_buffer_zero(struct ion_buffer *buffer);
277
278 /**
279 * ion_buffer_prep_noncached - flush cache before non-cached mapping
280 *
281 * @buffer: ion_buffer to flush
282 *
283 * The memory allocated by the heap could be in the CPU cache. To map
284 * this memory as non-cached, we need to flush the associated cache
285 * first. Without the flush, it is possible for stale dirty cache lines
286 * to be evicted after the ION client started writing into this buffer,
287 * leading to data corruption.
288 */
289 void ion_buffer_prep_noncached(struct ion_buffer *buffer);
290
291 /**
292 * ion_alloc - Allocates an ion buffer of given size from given heap
293 *
294 * @len: size of the buffer to be allocated.
295 * @heap_id_mask: a bitwise maks of heap ids to allocate from
296 * @flags: ION_BUFFER_XXXX flags for the new buffer.
297 *
298 * The function exports a dma_buf object for the new ion buffer internally
299 * and returns that to the caller. So, the buffer is ready to be used by other
300 * drivers immediately. Returns ERR_PTR in case of failure.
301 */
302 struct dma_buf *ion_alloc(size_t len, unsigned int heap_id_mask,
303 unsigned int flags);
304
305 /**
306 * ion_free - Releases the ion buffer.
307 *
308 * @buffer: ion buffer to be released
309 */
310 int ion_free(struct ion_buffer *buffer);
311
312 #else
313
__ion_device_add_heap(struct ion_heap * heap,struct module * owner)314 static inline int __ion_device_add_heap(struct ion_heap *heap,
315 struct module *owner)
316 {
317 return -ENODEV;
318 }
319
ion_heap_init_shrinker(struct ion_heap * heap)320 static inline int ion_heap_init_shrinker(struct ion_heap *heap)
321 {
322 return -ENODEV;
323 }
324
ion_heap_init_deferred_free(struct ion_heap * heap)325 static inline int ion_heap_init_deferred_free(struct ion_heap *heap)
326 {
327 return -ENODEV;
328 }
329
ion_heap_freelist_add(struct ion_heap * heap,struct ion_buffer * buffer)330 static inline void ion_heap_freelist_add(struct ion_heap *heap,
331 struct ion_buffer *buffer) {}
332
ion_heap_freelist_drain(struct ion_heap * heap,size_t size)333 static inline size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
334 {
335 return -ENODEV;
336 }
337
ion_heap_freelist_shrink(struct ion_heap * heap,size_t size)338 static inline size_t ion_heap_freelist_shrink(struct ion_heap *heap,
339 size_t size)
340 {
341 return -ENODEV;
342 }
343
ion_heap_freelist_size(struct ion_heap * heap)344 static inline size_t ion_heap_freelist_size(struct ion_heap *heap)
345 {
346 return -ENODEV;
347 }
348
ion_heap_map_kernel(struct ion_heap * heap,struct ion_buffer * buffer)349 static inline void *ion_heap_map_kernel(struct ion_heap *heap,
350 struct ion_buffer *buffer)
351 {
352 return ERR_PTR(-ENODEV);
353 }
354
ion_heap_unmap_kernel(struct ion_heap * heap,struct ion_buffer * buffer)355 static inline void ion_heap_unmap_kernel(struct ion_heap *heap,
356 struct ion_buffer *buffer) {}
357
ion_heap_map_user(struct ion_heap * heap,struct ion_buffer * buffer,struct vm_area_struct * vma)358 static inline int ion_heap_map_user(struct ion_heap *heap,
359 struct ion_buffer *buffer,
360 struct vm_area_struct *vma)
361 {
362 return -ENODEV;
363 }
364
ion_buffer_zero(struct ion_buffer * buffer)365 static inline int ion_buffer_zero(struct ion_buffer *buffer)
366 {
367 return -EINVAL;
368 }
369
ion_buffer_prep_noncached(struct ion_buffer * buffer)370 static inline void ion_buffer_prep_noncached(struct ion_buffer *buffer) {}
371
ion_alloc(size_t len,unsigned int heap_id_mask,unsigned int flags)372 static inline struct dma_buf *ion_alloc(size_t len, unsigned int heap_id_mask,
373 unsigned int flags)
374 {
375 return ERR_PTR(-ENOMEM);
376 }
377
ion_free(struct ion_buffer * buffer)378 static inline int ion_free(struct ion_buffer *buffer)
379 {
380 return 0;
381 }
382
383 #endif /* CONFIG_ION */
384 #endif /* _ION_KERNEL_H */
385