• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #ifndef __DRM_GEM_SHMEM_HELPER_H__
4 #define __DRM_GEM_SHMEM_HELPER_H__
5 
6 #include <linux/fs.h>
7 #include <linux/mm.h>
8 #include <linux/mutex.h>
9 
10 #include <drm/drm_file.h>
11 #include <drm/drm_gem.h>
12 #include <drm/drm_ioctl.h>
13 #include <drm/drm_prime.h>
14 
15 struct dma_buf_attachment;
16 struct drm_mode_create_dumb;
17 struct drm_printer;
18 struct sg_table;
19 
20 /**
21  * struct drm_gem_shmem_object - GEM object backed by shmem
22  */
23 struct drm_gem_shmem_object {
24 	/**
25 	 * @base: Base GEM object
26 	 */
27 	struct drm_gem_object base;
28 
29 	/**
30 	 * @pages_lock: Protects the page table and use count
31 	 */
32 	struct mutex pages_lock;
33 
34 	/**
35 	 * @pages: Page table
36 	 */
37 	struct page **pages;
38 
39 	/**
40 	 * @pages_use_count:
41 	 *
42 	 * Reference count on the pages table.
43 	 * The pages are put when the count reaches zero.
44 	 */
45 	unsigned int pages_use_count;
46 
47 	/**
48 	 * @madv: State for madvise
49 	 *
50 	 * 0 is active/inuse.
51 	 * A negative value is the object is purged.
52 	 * Positive values are driver specific and not used by the helpers.
53 	 */
54 	int madv;
55 
56 	/**
57 	 * @madv_list: List entry for madvise tracking
58 	 *
59 	 * Typically used by drivers to track purgeable objects
60 	 */
61 	struct list_head madv_list;
62 
63 	/**
64 	 * @pages_mark_dirty_on_put:
65 	 *
66 	 * Mark pages as dirty when they are put.
67 	 */
68 	unsigned int pages_mark_dirty_on_put    : 1;
69 
70 	/**
71 	 * @pages_mark_accessed_on_put:
72 	 *
73 	 * Mark pages as accessed when they are put.
74 	 */
75 	unsigned int pages_mark_accessed_on_put : 1;
76 
77 	/**
78 	 * @sgt: Scatter/gather table for imported PRIME buffers
79 	 */
80 	struct sg_table *sgt;
81 
82 	/**
83 	 * @vmap_lock: Protects the vmap address and use count
84 	 */
85 	struct mutex vmap_lock;
86 
87 	/**
88 	 * @vaddr: Kernel virtual address of the backing memory
89 	 */
90 	void *vaddr;
91 
92 	/**
93 	 * @vmap_use_count:
94 	 *
95 	 * Reference count on the virtual address.
96 	 * The address are un-mapped when the count reaches zero.
97 	 */
98 	unsigned int vmap_use_count;
99 
100 	/**
101 	 * @map_wc: map object write-combined (instead of using shmem defaults).
102 	 */
103 	bool map_wc;
104 };
105 
106 #define to_drm_gem_shmem_obj(obj) \
107 	container_of(obj, struct drm_gem_shmem_object, base)
108 
109 struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size);
110 void drm_gem_shmem_free_object(struct drm_gem_object *obj);
111 
112 int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem);
113 void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem);
114 int drm_gem_shmem_pin(struct drm_gem_object *obj);
115 void drm_gem_shmem_unpin(struct drm_gem_object *obj);
116 int drm_gem_shmem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map);
117 void drm_gem_shmem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map);
118 
119 int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv);
120 
drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object * shmem)121 static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem)
122 {
123 	return (shmem->madv > 0) &&
124 		!shmem->vmap_use_count && shmem->sgt &&
125 		!shmem->base.dma_buf && !shmem->base.import_attach;
126 }
127 
128 void drm_gem_shmem_purge_locked(struct drm_gem_object *obj);
129 bool drm_gem_shmem_purge(struct drm_gem_object *obj);
130 
131 int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
132 			      struct drm_mode_create_dumb *args);
133 
134 int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
135 
136 void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent,
137 			      const struct drm_gem_object *obj);
138 
139 struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj);
140 
141 /*
142  * GEM object functions
143  */
144 
145 /**
146  * drm_gem_shmem_object_free - GEM object function for drm_gem_shmem_free_object()
147  * @obj: GEM object to free
148  *
149  * This function wraps drm_gem_shmem_free_object(). Drivers that employ the shmem helpers
150  * should use it as their &drm_gem_object_funcs.free handler.
151  */
drm_gem_shmem_object_free(struct drm_gem_object * obj)152 static inline void drm_gem_shmem_object_free(struct drm_gem_object *obj)
153 {
154 	drm_gem_shmem_free_object(obj);
155 }
156 
157 /**
158  * drm_gem_shmem_object_print_info() - Print &drm_gem_shmem_object info for debugfs
159  * @p: DRM printer
160  * @indent: Tab indentation level
161  * @obj: GEM object
162  *
163  * This function wraps drm_gem_shmem_print_info(). Drivers that employ the shmem helpers should
164  * use this function as their &drm_gem_object_funcs.print_info handler.
165  */
drm_gem_shmem_object_print_info(struct drm_printer * p,unsigned int indent,const struct drm_gem_object * obj)166 static inline void drm_gem_shmem_object_print_info(struct drm_printer *p, unsigned int indent,
167 						   const struct drm_gem_object *obj)
168 {
169 	drm_gem_shmem_print_info(p, indent, obj);
170 }
171 
172 /**
173  * drm_gem_shmem_object_pin - GEM object function for drm_gem_shmem_pin()
174  * @obj: GEM object
175  *
176  * This function wraps drm_gem_shmem_pin(). Drivers that employ the shmem helpers should
177  * use it as their &drm_gem_object_funcs.pin handler.
178  */
drm_gem_shmem_object_pin(struct drm_gem_object * obj)179 static inline int drm_gem_shmem_object_pin(struct drm_gem_object *obj)
180 {
181 	return drm_gem_shmem_pin(obj);
182 }
183 
184 /**
185  * drm_gem_shmem_object_unpin - GEM object function for drm_gem_shmem_unpin()
186  * @obj: GEM object
187  *
188  * This function wraps drm_gem_shmem_unpin(). Drivers that employ the shmem helpers should
189  * use it as their &drm_gem_object_funcs.unpin handler.
190  */
drm_gem_shmem_object_unpin(struct drm_gem_object * obj)191 static inline void drm_gem_shmem_object_unpin(struct drm_gem_object *obj)
192 {
193 	drm_gem_shmem_unpin(obj);
194 }
195 
196 /**
197  * drm_gem_shmem_object_get_sg_table - GEM object function for drm_gem_shmem_get_sg_table()
198  * @obj: GEM object
199  *
200  * This function wraps drm_gem_shmem_get_sg_table(). Drivers that employ the shmem helpers should
201  * use it as their &drm_gem_object_funcs.get_sg_table handler.
202  *
203  * Returns:
204  * A pointer to the scatter/gather table of pinned pages or NULL on failure.
205  */
drm_gem_shmem_object_get_sg_table(struct drm_gem_object * obj)206 static inline struct sg_table *drm_gem_shmem_object_get_sg_table(struct drm_gem_object *obj)
207 {
208 	return drm_gem_shmem_get_sg_table(obj);
209 }
210 
211 /*
212  * drm_gem_shmem_object_vmap - GEM object function for drm_gem_shmem_vmap()
213  * @obj: GEM object
214  * @map: Returns the kernel virtual address of the SHMEM GEM object's backing store.
215  *
216  * This function wraps drm_gem_shmem_vmap(). Drivers that employ the shmem helpers should
217  * use it as their &drm_gem_object_funcs.vmap handler.
218  *
219  * Returns:
220  * 0 on success or a negative error code on failure.
221  */
drm_gem_shmem_object_vmap(struct drm_gem_object * obj,struct dma_buf_map * map)222 static inline int drm_gem_shmem_object_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
223 {
224 	return drm_gem_shmem_vmap(obj, map);
225 }
226 
227 /*
228  * drm_gem_shmem_object_vunmap - GEM object function for drm_gem_shmem_vunmap()
229  * @obj: GEM object
230  * @map: Kernel virtual address where the SHMEM GEM object was mapped
231  *
232  * This function wraps drm_gem_shmem_vunmap(). Drivers that employ the shmem helpers should
233  * use it as their &drm_gem_object_funcs.vunmap handler.
234  */
drm_gem_shmem_object_vunmap(struct drm_gem_object * obj,struct dma_buf_map * map)235 static inline void drm_gem_shmem_object_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map)
236 {
237 	drm_gem_shmem_vunmap(obj, map);
238 }
239 
240 /**
241  * drm_gem_shmem_object_mmap - GEM object function for drm_gem_shmem_mmap()
242  * @obj: GEM object
243  * @vma: VMA for the area to be mapped
244  *
245  * This function wraps drm_gem_shmem_mmap(). Drivers that employ the shmem helpers should
246  * use it as their &drm_gem_object_funcs.mmap handler.
247  *
248  * Returns:
249  * 0 on success or a negative error code on failure.
250  */
drm_gem_shmem_object_mmap(struct drm_gem_object * obj,struct vm_area_struct * vma)251 static inline int drm_gem_shmem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
252 {
253 	return drm_gem_shmem_mmap(obj, vma);
254 }
255 
256 /*
257  * Driver ops
258  */
259 
260 struct drm_gem_object *
261 drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
262 				    struct dma_buf_attachment *attach,
263 				    struct sg_table *sgt);
264 
265 struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj);
266 
267 /**
268  * DRM_GEM_SHMEM_DRIVER_OPS - Default shmem GEM operations
269  *
270  * This macro provides a shortcut for setting the shmem GEM operations in
271  * the &drm_driver structure.
272  */
273 #define DRM_GEM_SHMEM_DRIVER_OPS \
274 	.prime_handle_to_fd	= drm_gem_prime_handle_to_fd, \
275 	.prime_fd_to_handle	= drm_gem_prime_fd_to_handle, \
276 	.gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table, \
277 	.gem_prime_mmap		= drm_gem_prime_mmap, \
278 	.dumb_create		= drm_gem_shmem_dumb_create
279 
280 #endif /* __DRM_GEM_SHMEM_HELPER_H__ */
281