• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2021 HiHope Open Source Organization .
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef _LINUX_ION_H
17 #define _LINUX_ION_H
18 
19 #define ION_VERSION     "1.0"
20 
21 typedef int ion_user_handle_t;
22 
23 /**
24  * enum ion_heap_types - list of all possible types of heaps
25  * @ION_HEAP_TYPE_SYSTEM:    memory allocated via vmalloc
26  * @ION_HEAP_TYPE_SYSTEM_CONTIG: memory allocated via kmalloc
27  * @ION_HEAP_TYPE_CARVEOUT:  memory allocated from a prereserved
28  *               carveout heap, allocations are physically
29  *               contiguous
30  * @ION_HEAP_TYPE_DMA:       memory allocated via DMA API
31  * @ION_NUM_HEAPS:       helper for iterating over heaps, a bit mask
32  *               is used to identify the heaps, so only 32
33  *               total heap types are supported
34  */
35 enum ion_heap_type {
36     ION_HEAP_TYPE_SYSTEM,
37     ION_HEAP_TYPE_SYSTEM_CONTIG,
38     ION_HEAP_TYPE_CARVEOUT,
39     ION_HEAP_TYPE_CHUNK,
40     ION_HEAP_TYPE_DMA,
41     ION_HEAP_TYPE_CUSTOM,
42     ION_NUM_HEAPS = 16,
43 };
44 
45 enum ion_heap_ids {
46     ION_VMALLOC_HEAP_ID = 0,
47     ION_CARVEOUT_HEAP_ID = 2,
48     ION_CMA_HEAP_ID = 4,
49     ION_SECURE_HEAP_ID = 5,
50 };
51 
52 #define ION_HEAP(bit) (1 << (bit))
53 
54 #define ION_HEAP_SYSTEM_MASK        (1 << ION_HEAP_TYPE_SYSTEM)
55 #define ION_HEAP_SYSTEM_CONTIG_MASK (1 << ION_HEAP_TYPE_SYSTEM_CONTIG)
56 #define ION_HEAP_CARVEOUT_MASK      (1 << ION_HEAP_TYPE_CARVEOUT)
57 #define ION_HEAP_TYPE_DMA_MASK      (1 << ION_HEAP_TYPE_DMA)
58 
59 #ifdef __KERNEL__
60 struct ion_device;
61 struct ion_heap;
62 struct ion_mapper;
63 struct ion_client;
64 struct ion_buffer;
65 
66 /* This should be removed some day when phys_addr_t's are fully
67    plumbed in the kernel, and all instances of ion_phys_addr_t should
68    be converted to phys_addr_t.  For the time being many kernel interfaces
69    do not accept phys_addr_t's that would have to */
70 #define ion_phys_addr_t unsigned long
71 
72 /**
73  * struct ion_platform_heap - defines a heap in the given platform
74  * @type:   type of the heap from ion_heap_type enum
75  * @id:     unique identifier for heap.  When allocating (lower numbers
76  *      will be allocated from first)
77  * @name:   used for debug purposes
78  * @base:   base address of heap in physical memory if applicable
79  * @size:   size of the heap in bytes if applicable
80  *
81  * Provided by the board file.
82  */
83 struct ion_platform_heap {
84     enum ion_heap_type type;
85     unsigned int id;
86     const char *name;
87     ion_phys_addr_t base;
88     size_t size;
89 };
90 
91 /**
92  * struct ion_platform_data - array of platform heaps passed from board file
93  * @nr:     number of structures in the array
94  * @heaps:  array of platform_heap structions
95  *
96  * Provided by the board file in the form of platform data to a platform device.
97  */
98 struct ion_platform_data {
99     int nr;
100     struct ion_platform_heap heaps[];
101 };
102 
103 /**
104  * ion_client_create() -  allocate a client and returns it
105  * @dev:    the global ion device
106  * @heap_mask:  mask of heaps this client can allocate from
107  * @name:   used for debugging
108  */
109 struct ion_client *ion_client_create(struct ion_device *dev,
110                                      unsigned int heap_mask, const char *name);
111 
112 /**
113  * ion_client_destroy() -  free's a client and all it's handles
114  * @client: the client
115  *
116  * Free the provided client and all it's resources including
117  * any handles it is holding.
118  */
119 void ion_client_destroy(struct ion_client *client);
120 
121 /**
122  * ion_alloc - allocate ion memory
123  * @client: the client
124  * @len:    size of the allocation
125  * @align:  requested allocation alignment, lots of hardware blocks have
126  *      alignment requirements of some kind
127  * @flags:  mask of heaps to allocate from, if multiple bits are set
128  *      heaps will be tried in order from lowest to highest order bit
129  *
130  * Allocate memory in one of the heaps provided in heap mask and return
131  * an opaque handle to it.
132  */
133 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
134                              size_t align, unsigned int flags);
135 
136 /**
137  * ion_free - free a handle
138  * @client: the client
139  * @handle: the handle to free
140  *
141  * Free the provided handle.
142  */
143 void ion_free(struct ion_client *client, struct ion_handle *handle);
144 
145 /**
146  * ion_phys - returns the physical address and len of a handle
147  * @client: the client
148  * @handle: the handle
149  * @addr:   a pointer to put the address in
150  * @len:    a pointer to put the length in
151  *
152  * This function queries the heap for a particular handle to get the
153  * handle's physical address.  It't output is only correct if
154  * a heap returns physically contiguous memory -- in other cases
155  * this api should not be implemented -- ion_map_dma should be used
156  * instead.  Returns -EINVAL if the handle is invalid.  This has
157  * no implications on the reference counting of the handle --
158  * the returned value may not be valid if the caller is not
159  * holding a reference.
160  */
161 int ion_phys(struct ion_client *client, struct ion_handle *handle,
162              ion_phys_addr_t *addr, size_t *len);
163 
164 /**
165  * ion_map_kernel - create mapping for the given handle
166  * @client: the client
167  * @handle: handle to map
168  *
169  * Map the given handle into the kernel and return a kernel address that
170  * can be used to access this address.
171  */
172 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle);
173 
174 /**
175  * ion_unmap_kernel() - destroy a kernel mapping for a handle
176  * @client: the client
177  * @handle: handle to unmap
178  */
179 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle);
180 
181 /**
182  * ion_map_dma - create a dma mapping for a given handle
183  * @client: the client
184  * @handle: handle to map
185  *
186  * Return an sglist describing the given handle
187  */
188 struct scatterlist *ion_map_dma(struct ion_client *client,
189                                 struct ion_handle *handle);
190 
191 /**
192  * ion_unmap_dma() - destroy a dma mapping for a handle
193  * @client: the client
194  * @handle: handle to unmap
195  */
196 void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle);
197 
198 /**
199  * ion_share() - given a handle, obtain a buffer to pass to other clients
200  * @client: the client
201  * @handle: the handle to share
202  *
203  * Given a handle, return a buffer, which exists in a global name
204  * space, and can be passed to other clients.  Should be passed into ion_import
205  * to obtain a new handle for this buffer.
206  *
207  * NOTE: This function does do not an extra reference.  The burden is on the
208  * caller to make sure the buffer doesn't go away while it's being passed to
209  * another client.  That is, ion_free should not be called on this handle until
210  * the buffer has been imported into the other client.
211  */
212 struct ion_buffer *ion_share(struct ion_client *client,
213                              struct ion_handle *handle);
214 
215 /**
216  * ion_import() - given an buffer in another client, import it
217  * @client: this blocks client
218  * @buffer: the buffer to import (as obtained from ion_share)
219  *
220  * Given a buffer, add it to the client and return the handle to use to refer
221  * to it further.  This is called to share a handle from one kernel client to
222  * another.
223  */
224 struct ion_handle *ion_import(struct ion_client *client,
225                               struct ion_buffer *buffer);
226 
227 /**
228  * ion_import_fd() - given an fd obtained via ION_IOC_SHARE ioctl, import it
229  * @client: this blocks client
230  * @fd:     the fd
231  *
232  * A helper function for drivers that will be recieving ion buffers shared
233  * with them from userspace.  These buffers are represented by a file
234  * descriptor obtained as the return from the ION_IOC_SHARE ioctl.
235  * This function coverts that fd into the underlying buffer, and returns
236  * the handle to use to refer to it further.
237  */
238 struct ion_handle *ion_import_fd(struct ion_client *client, int fd);
239 #endif /* __KERNEL__ */
240 
241 /**
242  * DOC: Ion Userspace API
243  *
244  * create a client by opening /dev/ion
245  * most operations handled via following ioctls
246  *
247  */
248 
249 /**
250  * struct ion_allocation_data - metadata passed from userspace for allocations
251  * @len:    size of the allocation
252  * @align:  required alignment of the allocation
253  * @flags:  flags passed to heap
254  * @handle: pointer that will be populated with a cookie to use to refer
255  *      to this allocation
256  *
257  * Provided by userspace as an argument to the ioctl
258  */
259 struct ion_allocation_data {
260     size_t len;
261     size_t align;
262     unsigned int heap_id_mask;
263     unsigned int flags;
264     ion_user_handle_t handle;
265 };
266 
267 /**
268  * struct ion_fd_data - metadata passed to/from userspace for a handle/fd pair
269  * @handle: a handle
270  * @fd:     a file descriptor representing that handle
271  *
272  * For ION_IOC_SHARE or ION_IOC_MAP userspace populates the handle field with
273  * the handle returned from ion alloc, and the kernel returns the file
274  * descriptor to share or map in the fd field.  For ION_IOC_IMPORT, userspace
275  * provides the file descriptor and the kernel returns the handle.
276  */
277 struct ion_fd_data {
278     ion_user_handle_t handle;
279     int fd;
280 };
281 
282 /**
283  * struct ion_handle_data - a handle passed to/from the kernel
284  * @handle: a handle
285  */
286 struct ion_handle_data {
287     ion_user_handle_t handle;
288 };
289 
290 /**
291  * struct ion_custom_data - metadata passed to/from userspace for a custom ioctl
292  * @cmd:    the custom ioctl function to call
293  * @arg:    additional data to pass to the custom ioctl, typically a user
294  *      pointer to a predefined structure
295  *
296  * This works just like the regular cmd and arg fields of an ioctl.
297  */
298 struct ion_custom_data {
299     unsigned int cmd;
300     unsigned long arg;
301 };
302 
303 /* struct ion_flush_data - data passed to ion for flushing caches
304  *
305  * @handle: handle with data to flush
306  * @fd:     fd to flush
307  * @vaddr:  userspace virtual address mapped with mmap
308  * @offset: offset into the handle to flush
309  * @length: length of handle to flush
310  *
311  * Performs cache operations on the handle. If p is the start address
312  * of the handle, p + offset through p + offset + length will have
313  * the cache operations performed
314  */
315 struct ion_flush_data {
316     ion_user_handle_t handle;
317     int fd;
318     void *vaddr;
319     unsigned int offset;
320     unsigned int length;
321 };
322 
323 /// no available in new ion-kernel
324 struct ion_phys_data {
325     ion_user_handle_t handle;
326     unsigned long phys;
327     unsigned long size;
328 };
329 
330 struct ion_cacheop_data {
331 #define ION_CACHE_FLUSH     0
332 #define ION_CACHE_CLEAN     1
333 #define ION_CACHE_INV       2
334     unsigned int type;
335     struct ion_handle *handle;
336     void *virt;
337 };
338 struct ion_buffer_info {
339     unsigned long phys;
340     unsigned long size;
341 };
342 struct ion_client_info {
343 #define MAX_BUFFER_COUNT    127
344     unsigned int count;
345     unsigned long total_size;
346     struct ion_buffer_info buf[MAX_BUFFER_COUNT];
347 };
348 struct ion_heap_info {
349     unsigned int id;
350     unsigned long allocated_size;
351     unsigned long max_allocated;
352     unsigned long total_size;
353 };
354 
355 struct ion_share_obj_data {
356     int fd;
357     void *obj;
358 };
359 ///////////////////////////////////////////////////
360 
361 #define ION_IOC_MAGIC       'I'
362 
363 /**
364  * DOC: ION_IOC_ALLOC - allocate memory
365  *
366  * Takes an ion_allocation_data struct and returns it with the handle field
367  * populated with the opaque handle for the allocation.
368  */
369 #define ION_IOC_ALLOC       _IOWR(ION_IOC_MAGIC, 0, \
370                       struct ion_allocation_data)
371 
372 /**
373  * DOC: ION_IOC_FREE - free memory
374  *
375  * Takes an ion_handle_data struct and frees the handle.
376  */
377 #define ION_IOC_FREE        _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
378 
379 /**
380  * DOC: ION_IOC_MAP - get a file descriptor to mmap
381  *
382  * Takes an ion_fd_data struct with the handle field populated with a valid
383  * opaque handle.  Returns the struct with the fd field set to a file
384  * descriptor open in the current address space.  This file descriptor
385  * can then be used as an argument to mmap.
386  */
387 #define ION_IOC_MAP     _IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data)
388 
389 /**
390  * DOC: ION_IOC_SHARE - creates a file descriptor to use to share an allocation
391  *
392  * Takes an ion_fd_data struct with the handle field populated with a valid
393  * opaque handle.  Returns the struct with the fd field set to a file
394  * descriptor open in the current address space.  This file descriptor
395  * can then be passed to another process.  The corresponding opaque handle can
396  * be retrieved via ION_IOC_IMPORT.
397  */
398 #define ION_IOC_SHARE       _IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data)
399 
400 /**
401  * DOC: ION_IOC_IMPORT - imports a shared file descriptor
402  *
403  * Takes an ion_fd_data struct with the fd field populated with a valid file
404  * descriptor obtained from ION_IOC_SHARE and returns the struct with the handle
405  * filed set to the corresponding opaque handle.
406  */
407 #define ION_IOC_IMPORT      _IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data)
408 
409 /**
410  * DOC: ION_IOC_SYNC - syncs a shared file descriptors to memory
411  *
412  * Deprecated in favor of using the dma_buf api's correctly (syncing
413  * will happend automatically when the buffer is mapped to a device).
414  * If necessary should be used after touching a cached buffer from the cpu,
415  * this will make the buffer in memory coherent.
416  */
417 #define ION_IOC_SYNC        _IOWR(ION_IOC_MAGIC, 7, struct ion_fd_data)
418 
419 /**
420  * DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl
421  *
422  * Takes the argument of the architecture specific ioctl to call and
423  * passes appropriate userdata for that ioctl
424  */
425 #define ION_IOC_CUSTOM          _IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data)
426 
427 
428 #if 1
429 #define ION_IOC_ROCKCHIP_MAGIC 'R'
430 
431 /**
432  * Clean the caches of the handle specified.
433  */
434 #define ION_IOC_CLEAN_CACHES    _IOWR(ION_IOC_ROCKCHIP_MAGIC, 0, \
435                         struct ion_flush_data)
436 /**
437  * Invalidate the caches of the handle specified.
438  */
439 #define ION_IOC_INV_CACHES  _IOWR(ION_IOC_ROCKCHIP_MAGIC, 1, \
440                         struct ion_flush_data)
441 /**
442  * Clean and invalidate the caches of the handle specified.
443  */
444 #define ION_IOC_CLEAN_INV_CACHES    _IOWR(ION_IOC_ROCKCHIP_MAGIC, 2, \
445                         struct ion_flush_data)
446 
447 /**
448  * Get phys addr of the handle specified.
449  */
450 #define ION_IOC_GET_PHYS    _IOWR(ION_IOC_ROCKCHIP_MAGIC, 3, \
451                         struct ion_phys_data)
452 
453 #define ION_IOC_CLEAN_CACHES _IOWR(ION_IOC_ROCKCHIP_MAGIC, 0,   struct ion_flush_data)
454 #define ION_IOC_INV_CACHES _IOWR(ION_IOC_ROCKCHIP_MAGIC, 1,   struct ion_flush_data)
455 /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
456 #define ION_IOC_CLEAN_INV_CACHES _IOWR(ION_IOC_ROCKCHIP_MAGIC, 2,   struct ion_flush_data)
457 
458 #define ION_IOC_GET_SHARE _IOWR(ION_IOC_ROCKCHIP_MAGIC, 4, struct ion_share_obj_data)
459 #define ION_IOC_SET_SHARE _IOWR(ION_IOC_ROCKCHIP_MAGIC, 5, struct ion_share_obj_data)
460 
461 #else
462 /// no available in new ion-kernel.
463 #define ION_CUSTOM_GET_PHYS     _IOWR(ION_IOC_MAGIC, 15, \
464                             struct ion_phys_data)
465 
466 #define ION_CUSTOM_CACHE_OP     _IOWR(ION_IOC_MAGIC, 8, \
467                             struct ion_cacheop_data)
468 
469 #define ION_CUSTOM_GET_CLIENT_INFO  _IOWR(ION_IOC_MAGIC, 9, \
470                             struct ion_client_info)
471 
472 #define ION_CUSTOM_GET_HEAP_INFO    _IOWR(ION_IOC_MAGIC, 10, \
473                             struct ion_heap_info)
474 /* Compatible with pmem */
475 struct ion_pmem_region {
476     unsigned long offset;
477     unsigned long len;
478 };
479 #define ION_PMEM_GET_PHYS       _IOW('p', 1, unsigned int)
480 #define ION_PMEM_CACHE_FLUSH        _IOW('p', 8, unsigned int)
481 #endif
482 ///////////////////////////////////////////
483 
484 #endif /* _LINUX_ION_H */
485