• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2023-2024 Broadcom. All Rights Reserved. The term
5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 
29 #ifndef VMWGFX_BO_H
30 #define VMWGFX_BO_H
31 
32 #include "device_include/svga_reg.h"
33 
34 #include <drm/ttm/ttm_bo.h>
35 #include <drm/ttm/ttm_placement.h>
36 
37 #include <linux/rbtree_types.h>
38 #include <linux/types.h>
39 #include <linux/xarray.h>
40 
41 struct vmw_bo_dirty;
42 struct vmw_fence_obj;
43 struct vmw_private;
44 struct vmw_resource;
45 struct vmw_surface;
46 
47 enum vmw_bo_domain {
48 	VMW_BO_DOMAIN_SYS           = BIT(0),
49 	VMW_BO_DOMAIN_WAITABLE_SYS  = BIT(1),
50 	VMW_BO_DOMAIN_VRAM          = BIT(2),
51 	VMW_BO_DOMAIN_GMR           = BIT(3),
52 	VMW_BO_DOMAIN_MOB           = BIT(4),
53 };
54 
55 struct vmw_bo_params {
56 	u32 domain;
57 	u32 busy_domain;
58 	enum ttm_bo_type bo_type;
59 	bool pin;
60 	bool keep_resv;
61 	size_t size;
62 	struct dma_resv *resv;
63 	struct sg_table *sg;
64 };
65 
66 /**
67  * struct vmw_bo - TTM buffer object with vmwgfx additions
68  * @tbo: The TTM buffer object
69  * @placement: The preferred placement for this buffer object
70  * @places: The chosen places for the preferred placement.
71  * @busy_places: Chosen busy places for the preferred placement
72  * @map: Kmap object for semi-persistent mappings
73  * @res_tree: RB tree of resources using this buffer object as a backing MOB
74  * @res_prios: Eviction priority counts for attached resources
75  * @map_count: The number of currently active maps. Will differ from the
76  * cpu_writers because it includes kernel maps.
77  * @cpu_writers: Number of synccpu write grabs. Protected by reservation when
78  * increased. May be decreased without reservation.
79  * @dx_query_ctx: DX context if this buffer object is used as a DX query MOB
80  * @dirty: structure for user-space dirty-tracking
81  */
82 struct vmw_bo {
83 	struct ttm_buffer_object tbo;
84 
85 	struct ttm_placement placement;
86 	struct ttm_place places[5];
87 	struct ttm_place busy_places[5];
88 
89 	/* Protected by reservation */
90 	struct ttm_bo_kmap_obj map;
91 
92 	struct rb_root res_tree;
93 	u32 res_prios[TTM_MAX_BO_PRIORITY];
94 	struct xarray detached_resources;
95 
96 	atomic_t map_count;
97 	atomic_t cpu_writers;
98 	/* Not ref-counted.  Protected by binding_mutex */
99 	struct vmw_resource *dx_query_ctx;
100 	struct vmw_bo_dirty *dirty;
101 
102 	bool is_dumb;
103 	struct vmw_surface *dumb_surface;
104 };
105 
106 void vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain);
107 void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo);
108 
109 int vmw_bo_create(struct vmw_private *dev_priv,
110 		  struct vmw_bo_params *params,
111 		  struct vmw_bo **p_bo);
112 
113 int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
114 		       struct drm_file *file_priv);
115 
116 int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
117 		       struct vmw_bo *buf,
118 		       bool interruptible);
119 int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
120 			      struct vmw_bo *buf,
121 			      bool interruptible);
122 int vmw_bo_pin_in_start_of_vram(struct vmw_private *vmw_priv,
123 				struct vmw_bo *bo,
124 				bool interruptible);
125 void vmw_bo_pin_reserved(struct vmw_bo *bo, bool pin);
126 int vmw_bo_unpin(struct vmw_private *vmw_priv,
127 		 struct vmw_bo *bo,
128 		 bool interruptible);
129 
130 void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
131 			  SVGAGuestPtr *ptr);
132 int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
133 			      struct drm_file *file_priv);
134 void vmw_bo_fence_single(struct ttm_buffer_object *bo,
135 			 struct vmw_fence_obj *fence);
136 
137 void *vmw_bo_map_and_cache(struct vmw_bo *vbo);
138 void *vmw_bo_map_and_cache_size(struct vmw_bo *vbo, size_t size);
139 void vmw_bo_unmap(struct vmw_bo *vbo);
140 
141 void vmw_bo_move_notify(struct ttm_buffer_object *bo,
142 			struct ttm_resource *mem);
143 void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
144 
145 int vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res);
146 void vmw_bo_del_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res);
147 struct vmw_surface *vmw_bo_surface(struct vmw_bo *vbo);
148 
149 int vmw_user_bo_lookup(struct drm_file *filp,
150 		       u32 handle,
151 		       struct vmw_bo **out);
152 
153 /**
154  * vmw_bo_adjust_prio - Adjust the buffer object eviction priority
155  * according to attached resources
156  * @vbo: The struct vmw_bo
157  */
vmw_bo_prio_adjust(struct vmw_bo * vbo)158 static inline void vmw_bo_prio_adjust(struct vmw_bo *vbo)
159 {
160 	int i = ARRAY_SIZE(vbo->res_prios);
161 
162 	while (i--) {
163 		if (vbo->res_prios[i]) {
164 			vbo->tbo.priority = i;
165 			return;
166 		}
167 	}
168 
169 	vbo->tbo.priority = 3;
170 }
171 
172 /**
173  * vmw_bo_prio_add - Notify a buffer object of a newly attached resource
174  * eviction priority
175  * @vbo: The struct vmw_bo
176  * @prio: The resource priority
177  *
178  * After being notified, the code assigns the highest resource eviction priority
179  * to the backing buffer object (mob).
180  */
vmw_bo_prio_add(struct vmw_bo * vbo,int prio)181 static inline void vmw_bo_prio_add(struct vmw_bo *vbo, int prio)
182 {
183 	if (vbo->res_prios[prio]++ == 0)
184 		vmw_bo_prio_adjust(vbo);
185 }
186 
187 /**
188  * vmw_bo_used_prio_del - Notify a buffer object of a resource with a certain
189  * priority being removed
190  * @vbo: The struct vmw_bo
191  * @prio: The resource priority
192  *
193  * After being notified, the code assigns the highest resource eviction priority
194  * to the backing buffer object (mob).
195  */
vmw_bo_prio_del(struct vmw_bo * vbo,int prio)196 static inline void vmw_bo_prio_del(struct vmw_bo *vbo, int prio)
197 {
198 	if (--vbo->res_prios[prio] == 0)
199 		vmw_bo_prio_adjust(vbo);
200 }
201 
vmw_bo_unreference(struct vmw_bo ** buf)202 static inline void vmw_bo_unreference(struct vmw_bo **buf)
203 {
204 	struct vmw_bo *tmp_buf = *buf;
205 
206 	*buf = NULL;
207 	if (tmp_buf)
208 		ttm_bo_put(&tmp_buf->tbo);
209 }
210 
vmw_bo_reference(struct vmw_bo * buf)211 static inline struct vmw_bo *vmw_bo_reference(struct vmw_bo *buf)
212 {
213 	ttm_bo_get(&buf->tbo);
214 	return buf;
215 }
216 
vmw_user_bo_ref(struct vmw_bo * vbo)217 static inline struct vmw_bo *vmw_user_bo_ref(struct vmw_bo *vbo)
218 {
219 	drm_gem_object_get(&vbo->tbo.base);
220 	return vbo;
221 }
222 
vmw_user_bo_unref(struct vmw_bo ** buf)223 static inline void vmw_user_bo_unref(struct vmw_bo **buf)
224 {
225 	struct vmw_bo *tmp_buf = *buf;
226 
227 	*buf = NULL;
228 	if (tmp_buf)
229 		drm_gem_object_put(&tmp_buf->tbo.base);
230 }
231 
to_vmw_bo(struct drm_gem_object * gobj)232 static inline struct vmw_bo *to_vmw_bo(struct drm_gem_object *gobj)
233 {
234 	return container_of((gobj), struct vmw_bo, tbo.base);
235 }
236 
237 #endif // VMWGFX_BO_H
238