• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef _LINUX_MSM_ION_H
2 #define _LINUX_MSM_ION_H
3 
4 #include <linux/ion.h>
5 
6 enum msm_ion_heap_types {
7 	ION_HEAP_TYPE_MSM_START = ION_HEAP_TYPE_CUSTOM + 1,
8 	ION_HEAP_TYPE_IOMMU = ION_HEAP_TYPE_MSM_START,
9 	ION_HEAP_TYPE_DMA,
10 	ION_HEAP_TYPE_CP,
11 	ION_HEAP_TYPE_SECURE_DMA,
12 	ION_HEAP_TYPE_REMOVED,
13 };
14 
15 /**
16  * These are the only ids that should be used for Ion heap ids.
17  * The ids listed are the order in which allocation will be attempted
18  * if specified. Don't swap the order of heap ids unless you know what
19  * you are doing!
20  * Id's are spaced by purpose to allow new Id's to be inserted in-between (for
21  * possible fallbacks)
22  */
23 
24 enum ion_heap_ids {
25 	INVALID_HEAP_ID = -1,
26 	ION_CP_MM_HEAP_ID = 8,
27 	ION_CP_MFC_HEAP_ID = 12,
28 	ION_CP_WB_HEAP_ID = 16, /* 8660 only */
29 	ION_CAMERA_HEAP_ID = 20, /* 8660 only */
30 	ION_SYSTEM_CONTIG_HEAP_ID = 21,
31 	ION_ADSP_HEAP_ID = 22,
32 	ION_PIL1_HEAP_ID = 23, /* Currently used for other PIL images */
33 	ION_SF_HEAP_ID = 24,
34 	ION_IOMMU_HEAP_ID = 25,
35 	ION_PIL2_HEAP_ID = 26, /* Currently used for modem firmware images */
36 	ION_QSECOM_HEAP_ID = 27,
37 	ION_AUDIO_HEAP_ID = 28,
38 
39 	ION_MM_FIRMWARE_HEAP_ID = 29,
40 	ION_SYSTEM_HEAP_ID = 30,
41 
42 	ION_HEAP_ID_RESERVED = 31 /** Bit reserved for ION_FLAG_SECURE flag */
43 };
44 
45 enum ion_fixed_position {
46 	NOT_FIXED,
47 	FIXED_LOW,
48 	FIXED_MIDDLE,
49 	FIXED_HIGH,
50 };
51 
52 enum cp_mem_usage {
53 	VIDEO_BITSTREAM = 0x1,
54 	VIDEO_PIXEL = 0x2,
55 	VIDEO_NONPIXEL = 0x3,
56 	MAX_USAGE = 0x4,
57 	UNKNOWN = 0x7FFFFFFF,
58 };
59 
60 #define ION_HEAP_CP_MASK		(1 << ION_HEAP_TYPE_CP)
61 #define ION_HEAP_TYPE_DMA_MASK         (1 << ION_HEAP_TYPE_DMA)
62 
63 /**
64  * Flag to use when allocating to indicate that a heap is secure.
65  */
66 #define ION_FLAG_SECURE (1 << ION_HEAP_ID_RESERVED)
67 
68 /**
69  * Flag for clients to force contiguous memort allocation
70  *
71  * Use of this flag is carefully monitored!
72  */
73 #define ION_FLAG_FORCE_CONTIGUOUS (1 << 30)
74 
75 /*
76  * Used in conjunction with heap which pool memory to force an allocation
77  * to come from the page allocator directly instead of from the pool allocation
78  */
79 #define ION_FLAG_POOL_FORCE_ALLOC (1 << 16)
80 
81 /**
82 * Deprecated! Please use the corresponding ION_FLAG_*
83 */
84 #define ION_SECURE ION_FLAG_SECURE
85 #define ION_FORCE_CONTIGUOUS ION_FLAG_FORCE_CONTIGUOUS
86 
87 /**
88  * Macro should be used with ion_heap_ids defined above.
89  */
90 #define ION_HEAP(bit) (1 << (bit))
91 
92 #define ION_ADSP_HEAP_NAME	"adsp"
93 #define ION_VMALLOC_HEAP_NAME	"vmalloc"
94 #define ION_KMALLOC_HEAP_NAME	"kmalloc"
95 #define ION_AUDIO_HEAP_NAME	"audio"
96 #define ION_SF_HEAP_NAME	"sf"
97 #define ION_MM_HEAP_NAME	"mm"
98 #define ION_CAMERA_HEAP_NAME	"camera_preview"
99 #define ION_IOMMU_HEAP_NAME	"iommu"
100 #define ION_MFC_HEAP_NAME	"mfc"
101 #define ION_WB_HEAP_NAME	"wb"
102 #define ION_MM_FIRMWARE_HEAP_NAME	"mm_fw"
103 #define ION_PIL1_HEAP_NAME  "pil_1"
104 #define ION_PIL2_HEAP_NAME  "pil_2"
105 #define ION_QSECOM_HEAP_NAME	"qsecom"
106 
107 #define ION_SET_CACHED(__cache)		(__cache | ION_FLAG_CACHED)
108 #define ION_SET_UNCACHED(__cache)	(__cache & ~ION_FLAG_CACHED)
109 
110 #define ION_IS_CACHED(__flags)	((__flags) & ION_FLAG_CACHED)
111 
112 #ifdef __KERNEL__
113 
114 /*
115  * This flag allows clients when mapping into the IOMMU to specify to
116  * defer un-mapping from the IOMMU until the buffer memory is freed.
117  */
118 #define ION_IOMMU_UNMAP_DELAYED 1
119 
120 /*
121  * This flag allows clients to defer unsecuring a buffer until the buffer
122  * is actually freed.
123  */
124 #define ION_UNSECURE_DELAYED	1
125 
126 /**
127  * struct ion_cp_heap_pdata - defines a content protection heap in the given
128  * platform
129  * @permission_type:	Memory ID used to identify the memory to TZ
130  * @align:		Alignment requirement for the memory
131  * @secure_base:	Base address for securing the heap.
132  *			Note: This might be different from actual base address
133  *			of this heap in the case of a shared heap.
134  * @secure_size:	Memory size for securing the heap.
135  *			Note: This might be different from actual size
136  *			of this heap in the case of a shared heap.
137  * @fixed_position	If nonzero, position in the fixed area.
138  * @iommu_map_all:	Indicates whether we should map whole heap into IOMMU.
139  * @iommu_2x_map_domain: Indicates the domain to use for overmapping.
140  * @request_region:	function to be called when the number of allocations
141  *			goes from 0 -> 1
142  * @release_region:	function to be called when the number of allocations
143  *			goes from 1 -> 0
144  * @setup_region:	function to be called upon ion registration
145  * @memory_type:Memory type used for the heap
146  * @allow_nonsecure_alloc: allow non-secure allocations from this heap. For
147  *			secure heaps, this flag must be set so allow non-secure
148  *			allocations. For non-secure heaps, this flag is ignored.
149  *
150  */
151 struct ion_cp_heap_pdata {
152 	enum ion_permission_type permission_type;
153 	unsigned int align;
154 	ion_phys_addr_t secure_base; /* Base addr used when heap is shared */
155 	size_t secure_size; /* Size used for securing heap when heap is shared*/
156 	int is_cma;
157 	enum ion_fixed_position fixed_position;
158 	int iommu_map_all;
159 	int iommu_2x_map_domain;
160 	int (*request_region)(void *);
161 	int (*release_region)(void *);
162 	void *(*setup_region)(void);
163 	enum ion_memory_types memory_type;
164 	int allow_nonsecure_alloc;
165 };
166 
167 /**
168  * struct ion_co_heap_pdata - defines a carveout heap in the given platform
169  * @adjacent_mem_id:	Id of heap that this heap must be adjacent to.
170  * @align:		Alignment requirement for the memory
171  * @fixed_position	If nonzero, position in the fixed area.
172  * @request_region:	function to be called when the number of allocations
173  *			goes from 0 -> 1
174  * @release_region:	function to be called when the number of allocations
175  *			goes from 1 -> 0
176  * @setup_region:	function to be called upon ion registration
177  * @memory_type:Memory type used for the heap
178  *
179  */
180 struct ion_co_heap_pdata {
181 	int adjacent_mem_id;
182 	unsigned int align;
183 	enum ion_fixed_position fixed_position;
184 	int (*request_region)(void *);
185 	int (*release_region)(void *);
186 	void *(*setup_region)(void);
187 	enum ion_memory_types memory_type;
188 };
189 
190 #ifdef CONFIG_ION
191 /**
192  *  msm_ion_client_create - allocate a client using the ion_device specified in
193  *				drivers/gpu/ion/msm/msm_ion.c
194  *
195  * heap_mask and name are the same as ion_client_create, return values
196  * are the same as ion_client_create.
197  */
198 
199 struct ion_client *msm_ion_client_create(unsigned int heap_mask,
200 					const char *name);
201 
202 /**
203  * ion_handle_get_flags - get the flags for a given handle
204  *
205  * @client - client who allocated the handle
206  * @handle - handle to get the flags
207  * @flags - pointer to store the flags
208  *
209  * Gets the current flags for a handle. These flags indicate various options
210  * of the buffer (caching, security, etc.)
211  */
212 int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
213 				unsigned long *flags);
214 
215 
216 /**
217  * ion_map_iommu - map the given handle into an iommu
218  *
219  * @client - client who allocated the handle
220  * @handle - handle to map
221  * @domain_num - domain number to map to
222  * @partition_num - partition number to allocate iova from
223  * @align - alignment for the iova
224  * @iova_length - length of iova to map. If the iova length is
225  *		greater than the handle length, the remaining
226  *		address space will be mapped to a dummy buffer.
227  * @iova - pointer to store the iova address
228  * @buffer_size - pointer to store the size of the buffer
229  * @flags - flags for options to map
230  * @iommu_flags - flags specific to the iommu.
231  *
232  * Maps the handle into the iova space specified via domain number. Iova
233  * will be allocated from the partition specified via partition_num.
234  * Returns 0 on success, negative value on error.
235  */
236 int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
237 			int domain_num, int partition_num, unsigned long align,
238 			unsigned long iova_length, unsigned long *iova,
239 			unsigned long *buffer_size,
240 			unsigned long flags, unsigned long iommu_flags);
241 
242 
243 /**
244  * ion_handle_get_size - get the allocated size of a given handle
245  *
246  * @client - client who allocated the handle
247  * @handle - handle to get the size
248  * @size - pointer to store the size
249  *
250  * gives the allocated size of a handle. returns 0 on success, negative
251  * value on error
252  *
253  * NOTE: This is intended to be used only to get a size to pass to map_iommu.
254  * You should *NOT* rely on this for any other usage.
255  */
256 
257 int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
258 			unsigned long *size);
259 
260 /**
261  * ion_unmap_iommu - unmap the handle from an iommu
262  *
263  * @client - client who allocated the handle
264  * @handle - handle to unmap
265  * @domain_num - domain to unmap from
266  * @partition_num - partition to unmap from
267  *
268  * Decrement the reference count on the iommu mapping. If the count is
269  * 0, the mapping will be removed from the iommu.
270  */
271 void ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle,
272 			int domain_num, int partition_num);
273 
274 
275 /**
276  * ion_secure_heap - secure a heap
277  *
278  * @client - a client that has allocated from the heap heap_id
279  * @heap_id - heap id to secure.
280  * @version - version of content protection
281  * @data - extra data needed for protection
282  *
283  * Secure a heap
284  * Returns 0 on success
285  */
286 int ion_secure_heap(struct ion_device *dev, int heap_id, int version,
287 			void *data);
288 
289 /**
290  * ion_unsecure_heap - un-secure a heap
291  *
292  * @client - a client that has allocated from the heap heap_id
293  * @heap_id - heap id to un-secure.
294  * @version - version of content protection
295  * @data - extra data needed for protection
296  *
297  * Un-secure a heap
298  * Returns 0 on success
299  */
300 int ion_unsecure_heap(struct ion_device *dev, int heap_id, int version,
301 			void *data);
302 
303 /**
304  * msm_ion_do_cache_op - do cache operations.
305  *
306  * @client - pointer to ION client.
307  * @handle - pointer to buffer handle.
308  * @vaddr -  virtual address to operate on.
309  * @len - Length of data to do cache operation on.
310  * @cmd - Cache operation to perform:
311  *		ION_IOC_CLEAN_CACHES
312  *		ION_IOC_INV_CACHES
313  *		ION_IOC_CLEAN_INV_CACHES
314  *
315  * Returns 0 on success
316  */
317 int msm_ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
318 			void *vaddr, unsigned long len, unsigned int cmd);
319 
320 /**
321  * msm_ion_secure_heap - secure a heap. Wrapper around ion_secure_heap.
322  *
323  * @heap_id - heap id to secure.
324  *
325  * Secure a heap
326  * Returns 0 on success
327  */
328 int msm_ion_secure_heap(int heap_id);
329 
330 /**
331  * msm_ion_unsecure_heap - unsecure a heap. Wrapper around ion_unsecure_heap.
332  *
333   * @heap_id - heap id to secure.
334  *
335  * Un-secure a heap
336  * Returns 0 on success
337  */
338 int msm_ion_unsecure_heap(int heap_id);
339 
340 /**
341  * msm_ion_secure_heap_2_0 - secure a heap using 2.0 APIs
342  *  Wrapper around ion_secure_heap.
343  *
344  * @heap_id - heap id to secure.
345  * @usage - usage hint to TZ
346  *
347  * Secure a heap
348  * Returns 0 on success
349  */
350 int msm_ion_secure_heap_2_0(int heap_id, enum cp_mem_usage usage);
351 
352 /**
353  * msm_ion_unsecure_heap - unsecure a heap secured with 3.0 APIs.
354  * Wrapper around ion_unsecure_heap.
355  *
356  * @heap_id - heap id to secure.
357  * @usage - usage hint to TZ
358  *
359  * Un-secure a heap
360  * Returns 0 on success
361  */
362 int msm_ion_unsecure_heap_2_0(int heap_id, enum cp_mem_usage usage);
363 
364 /**
365  * msm_ion_secure_buffer - secure an individual buffer
366  *
367  * @client - client who has access to the buffer
368  * @handle - buffer to secure
369  * @usage - usage hint to TZ
370  * @flags - flags for the securing
371  */
372 int msm_ion_secure_buffer(struct ion_client *client, struct ion_handle *handle,
373 				enum cp_mem_usage usage, int flags);
374 
375 /**
376  * msm_ion_unsecure_buffer - unsecure an individual buffer
377  *
378  * @client - client who has access to the buffer
379  * @handle - buffer to secure
380  */
381 int msm_ion_unsecure_buffer(struct ion_client *client,
382 				struct ion_handle *handle);
383 #else
msm_ion_client_create(unsigned int heap_mask,const char * name)384 static inline struct ion_client *msm_ion_client_create(unsigned int heap_mask,
385 					const char *name)
386 {
387 	return ERR_PTR(-ENODEV);
388 }
389 
ion_map_iommu(struct ion_client * client,struct ion_handle * handle,int domain_num,int partition_num,unsigned long align,unsigned long iova_length,unsigned long * iova,unsigned long * buffer_size,unsigned long flags,unsigned long iommu_flags)390 static inline int ion_map_iommu(struct ion_client *client,
391 			struct ion_handle *handle, int domain_num,
392 			int partition_num, unsigned long align,
393 			unsigned long iova_length, unsigned long *iova,
394 			unsigned long *buffer_size,
395 			unsigned long flags,
396 			unsigned long iommu_flags)
397 {
398 	return -ENODEV;
399 }
400 
ion_handle_get_size(struct ion_client * client,struct ion_handle * handle,unsigned long * size)401 static inline int ion_handle_get_size(struct ion_client *client,
402 				struct ion_handle *handle, unsigned long *size)
403 {
404 	return -ENODEV;
405 }
406 
ion_unmap_iommu(struct ion_client * client,struct ion_handle * handle,int domain_num,int partition_num)407 static inline void ion_unmap_iommu(struct ion_client *client,
408 			struct ion_handle *handle, int domain_num,
409 			int partition_num)
410 {
411 	return;
412 }
413 
ion_secure_heap(struct ion_device * dev,int heap_id,int version,void * data)414 static inline int ion_secure_heap(struct ion_device *dev, int heap_id,
415 					int version, void *data)
416 {
417 	return -ENODEV;
418 
419 }
420 
ion_unsecure_heap(struct ion_device * dev,int heap_id,int version,void * data)421 static inline int ion_unsecure_heap(struct ion_device *dev, int heap_id,
422 					int version, void *data)
423 {
424 	return -ENODEV;
425 }
426 
ion_mark_dangling_buffers_locked(struct ion_device * dev)427 static inline void ion_mark_dangling_buffers_locked(struct ion_device *dev)
428 {
429 }
430 
msm_ion_do_cache_op(struct ion_client * client,struct ion_handle * handle,void * vaddr,unsigned long len,unsigned int cmd)431 static inline int msm_ion_do_cache_op(struct ion_client *client,
432 			struct ion_handle *handle, void *vaddr,
433 			unsigned long len, unsigned int cmd)
434 {
435 	return -ENODEV;
436 }
437 
msm_ion_secure_heap(int heap_id)438 static inline int msm_ion_secure_heap(int heap_id)
439 {
440 	return -ENODEV;
441 
442 }
443 
msm_ion_unsecure_heap(int heap_id)444 static inline int msm_ion_unsecure_heap(int heap_id)
445 {
446 	return -ENODEV;
447 }
448 
msm_ion_secure_heap_2_0(int heap_id,enum cp_mem_usage usage)449 static inline int msm_ion_secure_heap_2_0(int heap_id, enum cp_mem_usage usage)
450 {
451 	return -ENODEV;
452 }
453 
msm_ion_unsecure_heap_2_0(int heap_id,enum cp_mem_usage usage)454 static inline int msm_ion_unsecure_heap_2_0(int heap_id,
455 					enum cp_mem_usage usage)
456 {
457 	return -ENODEV;
458 }
459 
msm_ion_secure_buffer(struct ion_client * client,struct ion_handle * handle,enum cp_mem_usage usage,int flags)460 static inline int msm_ion_secure_buffer(struct ion_client *client,
461 					struct ion_handle *handle,
462 					enum cp_mem_usage usage,
463 					int flags)
464 {
465 	return -ENODEV;
466 }
467 
msm_ion_unsecure_buffer(struct ion_client * client,struct ion_handle * handle)468 static inline int msm_ion_unsecure_buffer(struct ion_client *client,
469 					struct ion_handle *handle)
470 {
471 	return -ENODEV;
472 }
473 #endif /* CONFIG_ION */
474 
475 #endif /* __KERNEL */
476 
477 /* struct ion_flush_data - data passed to ion for flushing caches
478  *
479  * @handle:	handle with data to flush
480  * @fd:		fd to flush
481  * @vaddr:	userspace virtual address mapped with mmap
482  * @offset:	offset into the handle to flush
483  * @length:	length of handle to flush
484  *
485  * Performs cache operations on the handle. If p is the start address
486  * of the handle, p + offset through p + offset + length will have
487  * the cache operations performed
488  */
489 struct ion_flush_data {
490 	struct ion_handle *handle;
491 	int fd;
492 	void *vaddr;
493 	unsigned int offset;
494 	unsigned int length;
495 };
496 
497 #define ION_IOC_MSM_MAGIC 'M'
498 
499 /**
500  * DOC: ION_IOC_CLEAN_CACHES - clean the caches
501  *
502  * Clean the caches of the handle specified.
503  */
504 #define ION_IOC_CLEAN_CACHES	_IOWR(ION_IOC_MSM_MAGIC, 0, \
505 						struct ion_flush_data)
506 /**
507  * DOC: ION_IOC_INV_CACHES - invalidate the caches
508  *
509  * Invalidate the caches of the handle specified.
510  */
511 #define ION_IOC_INV_CACHES	_IOWR(ION_IOC_MSM_MAGIC, 1, \
512 						struct ion_flush_data)
513 /**
514  * DOC: ION_IOC_CLEAN_INV_CACHES - clean and invalidate the caches
515  *
516  * Clean and invalidate the caches of the handle specified.
517  */
518 #define ION_IOC_CLEAN_INV_CACHES	_IOWR(ION_IOC_MSM_MAGIC, 2, \
519 						struct ion_flush_data)
520 
521 #endif
522