1 /*
2 * Copyright (c) 2009-2024 Broadcom. All Rights Reserved.
3 * The term “Broadcom” refers to Broadcom Inc.
4 * and/or its subsidiaries.
5 * SPDX-License-Identifier: MIT
6 */
7
8
9 #include "svga_cmd.h"
10 #include "util/u_debug.h"
11 #include "util/u_memory.h"
12 #include "pipe/p_defines.h"
13 #include "vmw_surface.h"
14 #include "vmw_screen.h"
15 #include "vmw_buffer.h"
16 #include "vmw_context.h"
17 #include "pipebuffer/pb_bufmgr.h"
18
19 void
vmw_svga_winsys_surface_init(struct svga_winsys_screen * sws,struct svga_winsys_surface * srf,unsigned surf_size,SVGA3dSurfaceAllFlags flags)20 vmw_svga_winsys_surface_init(struct svga_winsys_screen *sws,
21 struct svga_winsys_surface *srf,
22 unsigned surf_size, SVGA3dSurfaceAllFlags flags)
23 {
24 struct vmw_svga_winsys_surface *vsrf = vmw_svga_winsys_surface(srf);
25 void *data = NULL;
26 struct pb_buffer *pb_buf;
27 uint32_t pb_flags;
28 struct vmw_winsys_screen *vws = vsrf->screen;
29 pb_flags = PIPE_MAP_WRITE | PIPE_MAP_DISCARD_WHOLE_RESOURCE;
30
31 struct pb_manager *provider;
32 struct pb_desc desc;
33
34 mtx_lock(&vsrf->mutex);
35 data = vmw_svga_winsys_buffer_map(&vws->base, vsrf->buf, pb_flags);
36 if (data)
37 goto out_mapped;
38
39 provider = vws->pools.dma_fenced;
40 memset(&desc, 0, sizeof(desc));
41 desc.alignment = 4096;
42 pb_buf = provider->create_buffer(provider, vsrf->size, &desc);
43 if (pb_buf != NULL) {
44 struct svga_winsys_buffer *vbuf =
45 vmw_svga_winsys_buffer_wrap(pb_buf);
46
47 data = vmw_svga_winsys_buffer_map(&vws->base, vbuf, pb_flags);
48 if (data) {
49 vsrf->rebind = true;
50 if (vsrf->buf)
51 vmw_svga_winsys_buffer_destroy(&vws->base, vsrf->buf);
52 vsrf->buf = vbuf;
53 goto out_mapped;
54 } else {
55 vmw_svga_winsys_buffer_destroy(&vws->base, vbuf);
56 goto out_unlock;
57 }
58 }
59 else {
60 /* Cannot create a buffer, just unlock */
61 goto out_unlock;
62 }
63
64 out_mapped:
65 mtx_unlock(&vsrf->mutex);
66
67 if (data) {
68 if (flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT) {
69 memset(data, 0, surf_size + sizeof(SVGA3dDXSOState));
70 }
71 else {
72 memset(data, 0, surf_size);
73 }
74 }
75
76 mtx_lock(&vsrf->mutex);
77 vmw_svga_winsys_buffer_unmap(&vsrf->screen->base, vsrf->buf);
78 out_unlock:
79 mtx_unlock(&vsrf->mutex);
80 }
81
82
83 void *
vmw_svga_winsys_surface_map(struct svga_winsys_context * swc,struct svga_winsys_surface * srf,unsigned flags,bool * retry,bool * rebind)84 vmw_svga_winsys_surface_map(struct svga_winsys_context *swc,
85 struct svga_winsys_surface *srf,
86 unsigned flags, bool *retry,
87 bool *rebind)
88 {
89 struct vmw_svga_winsys_surface *vsrf = vmw_svga_winsys_surface(srf);
90 void *data = NULL;
91 struct pb_buffer *pb_buf;
92 uint32_t pb_flags;
93 struct vmw_winsys_screen *vws = vsrf->screen;
94
95 *retry = false;
96 *rebind = false;
97 assert((flags & (PIPE_MAP_READ | PIPE_MAP_WRITE)) != 0);
98 mtx_lock(&vsrf->mutex);
99
100 if (vsrf->mapcount) {
101 /* Other mappers will get confused if we discard. */
102 flags &= ~PIPE_MAP_DISCARD_WHOLE_RESOURCE;
103 }
104
105 vsrf->rebind = false;
106
107 /*
108 * If we intend to read, there's no point discarding the
109 * data if busy.
110 */
111 if (flags & PIPE_MAP_READ || vsrf->nodiscard)
112 flags &= ~PIPE_MAP_DISCARD_WHOLE_RESOURCE;
113
114 /*
115 * Discard is a hint to a synchronized map.
116 */
117 if (flags & PIPE_MAP_DISCARD_WHOLE_RESOURCE)
118 flags &= ~PIPE_MAP_UNSYNCHRONIZED;
119
120 /*
121 * The surface is allowed to be referenced on the command stream iff
122 * we're mapping unsynchronized or discard. This is an early check.
123 * We need to recheck after a failing discard map.
124 */
125 if (!(flags & (PIPE_MAP_DISCARD_WHOLE_RESOURCE |
126 PIPE_MAP_UNSYNCHRONIZED)) &&
127 p_atomic_read(&vsrf->validated)) {
128 *retry = true;
129 goto out_unlock;
130 }
131
132 pb_flags = flags & (PIPE_MAP_READ_WRITE | PIPE_MAP_UNSYNCHRONIZED |
133 PIPE_MAP_PERSISTENT);
134
135 if (flags & PIPE_MAP_DISCARD_WHOLE_RESOURCE) {
136 struct pb_manager *provider;
137 struct pb_desc desc;
138
139 /*
140 * First, if possible, try to map existing storage with DONTBLOCK.
141 */
142 if (!p_atomic_read(&vsrf->validated)) {
143 data = vmw_svga_winsys_buffer_map(&vws->base, vsrf->buf,
144 PIPE_MAP_DONTBLOCK | pb_flags);
145 if (data)
146 goto out_mapped;
147 }
148
149 /*
150 * Attempt to get a new buffer.
151 */
152 provider = vws->pools.dma_fenced;
153 memset(&desc, 0, sizeof(desc));
154 desc.alignment = 4096;
155 pb_buf = provider->create_buffer(provider, vsrf->size, &desc);
156 if (pb_buf != NULL) {
157 struct svga_winsys_buffer *vbuf =
158 vmw_svga_winsys_buffer_wrap(pb_buf);
159
160 data = vmw_svga_winsys_buffer_map(&vws->base, vbuf, pb_flags);
161 if (data) {
162 vsrf->rebind = true;
163 /*
164 * We've discarded data on this surface and thus
165 * it's data is no longer consider referenced.
166 */
167 vmw_swc_surface_clear_reference(swc, vsrf);
168 if (vsrf->buf)
169 vmw_svga_winsys_buffer_destroy(&vws->base, vsrf->buf);
170 vsrf->buf = vbuf;
171
172 /* Rebind persistent maps immediately */
173 if (flags & PIPE_MAP_PERSISTENT) {
174 *rebind = true;
175 vsrf->rebind = false;
176 }
177 goto out_mapped;
178 } else
179 vmw_svga_winsys_buffer_destroy(&vws->base, vbuf);
180 }
181 /*
182 * We couldn't get and map a new buffer for some reason.
183 * Fall through to an ordinary map.
184 * But tell pipe driver to flush now if already on validate list,
185 * Otherwise we'll overwrite previous contents.
186 */
187 if (!(flags & PIPE_MAP_UNSYNCHRONIZED) &&
188 p_atomic_read(&vsrf->validated)) {
189 *retry = true;
190 goto out_unlock;
191 }
192 }
193
194 pb_flags |= (flags & PIPE_MAP_DONTBLOCK);
195 data = vmw_svga_winsys_buffer_map(&vws->base, vsrf->buf, pb_flags);
196 if (data == NULL)
197 goto out_unlock;
198
199 out_mapped:
200 ++vsrf->mapcount;
201 vsrf->data = data;
202 vsrf->map_mode = flags & (PIPE_MAP_READ | PIPE_MAP_WRITE);
203 out_unlock:
204 mtx_unlock(&vsrf->mutex);
205 return data;
206 }
207
208
209 void
vmw_svga_winsys_surface_unmap(struct svga_winsys_context * swc,struct svga_winsys_surface * srf,bool * rebind)210 vmw_svga_winsys_surface_unmap(struct svga_winsys_context *swc,
211 struct svga_winsys_surface *srf,
212 bool *rebind)
213 {
214 struct vmw_svga_winsys_surface *vsrf = vmw_svga_winsys_surface(srf);
215 mtx_lock(&vsrf->mutex);
216 if (--vsrf->mapcount == 0) {
217 *rebind = vsrf->rebind;
218 vsrf->rebind = false;
219 } else {
220 *rebind = false;
221 }
222 vmw_svga_winsys_buffer_unmap(&vsrf->screen->base, vsrf->buf);
223 mtx_unlock(&vsrf->mutex);
224 }
225
226 void
vmw_svga_winsys_userspace_surface_destroy(struct svga_winsys_context * swc,uint32 sid)227 vmw_svga_winsys_userspace_surface_destroy(struct svga_winsys_context *swc,
228 uint32 sid)
229 {
230 SVGA3D_DestroyGBSurface(swc, sid);
231 swc->flush(swc, NULL);
232 vmw_swc_surface_clear_userspace_id(swc, sid);
233 }
234
235 void
vmw_svga_winsys_surface_reference(struct vmw_svga_winsys_surface ** pdst,struct vmw_svga_winsys_surface * src)236 vmw_svga_winsys_surface_reference(struct vmw_svga_winsys_surface **pdst,
237 struct vmw_svga_winsys_surface *src)
238 {
239 struct pipe_reference *src_ref;
240 struct pipe_reference *dst_ref;
241 struct vmw_svga_winsys_surface *dst;
242
243 if(pdst == NULL || *pdst == src)
244 return;
245
246 dst = *pdst;
247
248 src_ref = src ? &src->refcnt : NULL;
249 dst_ref = dst ? &dst->refcnt : NULL;
250
251 if (pipe_reference(dst_ref, src_ref)) {
252 if (dst->buf)
253 vmw_svga_winsys_buffer_destroy(&dst->screen->base, dst->buf);
254 if (vmw_has_userspace_surface(dst->screen))
255 vmw_svga_winsys_userspace_surface_destroy(dst->screen->swc, dst->sid);
256 else
257 vmw_ioctl_surface_destroy(dst->screen, dst->sid);
258 #if MESA_DEBUG
259 /* to detect dangling pointers */
260 assert(p_atomic_read(&dst->validated) == 0);
261 dst->sid = SVGA3D_INVALID_ID;
262 #endif
263 mtx_destroy(&dst->mutex);
264 FREE(dst);
265 }
266
267 *pdst = src;
268 }
269