1 /*
2 * Copyright 2019 Google LLC
3 * SPDX-License-Identifier: MIT
4 */
5
6 #ifndef VN_RENDERER_H
7 #define VN_RENDERER_H
8
9 #include "vn_common.h"
10
11 struct vn_renderer_shmem {
12 struct vn_refcount refcount;
13
14 uint32_t res_id;
15 size_t mmap_size; /* for internal use only (i.e., munmap) */
16 void *mmap_ptr;
17
18 struct list_head cache_head;
19 int64_t cache_timestamp;
20 };
21
22 struct vn_renderer_bo {
23 struct vn_refcount refcount;
24
25 uint32_t res_id;
26 /* for internal use only */
27 size_t mmap_size;
28 void *mmap_ptr;
29 };
30
31 /*
32 * A sync consists of a uint64_t counter. The counter can be updated by CPU
33 * or by GPU. It can also be waited on by CPU or by GPU until it reaches
34 * certain values.
35 *
36 * This models after timeline VkSemaphore rather than timeline drm_syncobj.
37 * The main difference is that drm_syncobj can have unsignaled value 0.
38 */
39 struct vn_renderer_sync {
40 uint32_t sync_id;
41 };
42
43 struct vn_renderer_info {
44 struct {
45 bool has_primary;
46 int primary_major;
47 int primary_minor;
48 bool has_render;
49 int render_major;
50 int render_minor;
51 } drm;
52
53 struct {
54 uint16_t vendor_id;
55 uint16_t device_id;
56
57 bool has_bus_info;
58 uint16_t domain;
59 uint8_t bus;
60 uint8_t device;
61 uint8_t function;
62 } pci;
63
64 bool has_dma_buf_import;
65 bool has_external_sync;
66 bool has_implicit_fencing;
67 bool has_guest_vram;
68
69 uint32_t max_timeline_count;
70
71 /* hw capset */
72 uint32_t wire_format_version;
73 uint32_t vk_xml_version;
74 uint32_t vk_ext_command_serialization_spec_version;
75 uint32_t vk_mesa_venus_protocol_spec_version;
76 uint32_t supports_blob_id_0;
77 /* combined mask for vk_extension_mask1, 2,..., N */
78 uint32_t vk_extension_mask[32];
79 uint32_t allow_vk_wait_syncs;
80 uint32_t supports_multiple_timelines;
81 };
82
83 struct vn_renderer_submit_batch {
84 const void *cs_data;
85 size_t cs_size;
86
87 /*
88 * Submit cs to the timeline identified by ring_idx. A timeline is
89 * typically associated with a physical VkQueue and bound to the ring_idx
90 * during VkQueue creation. After execution completes on the VkQueue, the
91 * timeline sync point is signaled.
92 *
93 * ring_idx 0 is reserved for the context-specific CPU timeline. sync
94 * points on the CPU timeline are signaled immediately after command
95 * processing by the renderer.
96 */
97 uint32_t ring_idx;
98
99 /* syncs to update when the timeline is signaled */
100 struct vn_renderer_sync *const *syncs;
101 /* TODO allow NULL when syncs are all binary? */
102 const uint64_t *sync_values;
103 uint32_t sync_count;
104 };
105
106 struct vn_renderer_submit {
107 /* BOs to pin and to fence implicitly
108 *
109 * TODO track all bos and automatically pin them. We don't do it yet
110 * because each vn_command_buffer owns a bo. We can probably make do by
111 * returning the bos to a bo cache and exclude bo cache from pinning.
112 */
113 struct vn_renderer_bo *const *bos;
114 uint32_t bo_count;
115
116 const struct vn_renderer_submit_batch *batches;
117 uint32_t batch_count;
118 };
119
120 struct vn_renderer_wait {
121 bool wait_any;
122 uint64_t timeout;
123
124 struct vn_renderer_sync *const *syncs;
125 /* TODO allow NULL when syncs are all binary? */
126 const uint64_t *sync_values;
127 uint32_t sync_count;
128 };
129
130 struct vn_renderer_ops {
131 void (*destroy)(struct vn_renderer *renderer,
132 const VkAllocationCallbacks *alloc);
133
134 VkResult (*submit)(struct vn_renderer *renderer,
135 const struct vn_renderer_submit *submit);
136
137 /*
138 * On success, returns VK_SUCCESS or VK_TIMEOUT. On failure, returns
139 * VK_ERROR_DEVICE_LOST or out of device/host memory.
140 */
141 VkResult (*wait)(struct vn_renderer *renderer,
142 const struct vn_renderer_wait *wait);
143 };
144
145 struct vn_renderer_shmem_ops {
146 struct vn_renderer_shmem *(*create)(struct vn_renderer *renderer,
147 size_t size);
148 void (*destroy)(struct vn_renderer *renderer,
149 struct vn_renderer_shmem *shmem);
150 };
151
152 struct vn_renderer_bo_ops {
153 VkResult (*create_from_device_memory)(
154 struct vn_renderer *renderer,
155 VkDeviceSize size,
156 vn_object_id mem_id,
157 VkMemoryPropertyFlags flags,
158 VkExternalMemoryHandleTypeFlags external_handles,
159 struct vn_renderer_bo **out_bo);
160
161 VkResult (*create_from_dma_buf)(struct vn_renderer *renderer,
162 VkDeviceSize size,
163 int fd,
164 VkMemoryPropertyFlags flags,
165 struct vn_renderer_bo **out_bo);
166
167 bool (*destroy)(struct vn_renderer *renderer, struct vn_renderer_bo *bo);
168
169 int (*export_dma_buf)(struct vn_renderer *renderer,
170 struct vn_renderer_bo *bo);
171
172 /* map is not thread-safe */
173 void *(*map)(struct vn_renderer *renderer, struct vn_renderer_bo *bo);
174
175 void (*flush)(struct vn_renderer *renderer,
176 struct vn_renderer_bo *bo,
177 VkDeviceSize offset,
178 VkDeviceSize size);
179 void (*invalidate)(struct vn_renderer *renderer,
180 struct vn_renderer_bo *bo,
181 VkDeviceSize offset,
182 VkDeviceSize size);
183 };
184
185 enum vn_renderer_sync_flags {
186 VN_RENDERER_SYNC_SHAREABLE = 1u << 0,
187 VN_RENDERER_SYNC_BINARY = 1u << 1,
188 };
189
190 struct vn_renderer_sync_ops {
191 VkResult (*create)(struct vn_renderer *renderer,
192 uint64_t initial_val,
193 uint32_t flags,
194 struct vn_renderer_sync **out_sync);
195
196 VkResult (*create_from_syncobj)(struct vn_renderer *renderer,
197 int fd,
198 bool sync_file,
199 struct vn_renderer_sync **out_sync);
200 void (*destroy)(struct vn_renderer *renderer,
201 struct vn_renderer_sync *sync);
202
203 int (*export_syncobj)(struct vn_renderer *renderer,
204 struct vn_renderer_sync *sync,
205 bool sync_file);
206
207 /* reset the counter */
208 VkResult (*reset)(struct vn_renderer *renderer,
209 struct vn_renderer_sync *sync,
210 uint64_t initial_val);
211
212 /* read the current value from the counter */
213 VkResult (*read)(struct vn_renderer *renderer,
214 struct vn_renderer_sync *sync,
215 uint64_t *val);
216
217 /* write a new value (larger than the current one) to the counter */
218 VkResult (*write)(struct vn_renderer *renderer,
219 struct vn_renderer_sync *sync,
220 uint64_t val);
221 };
222
223 struct vn_renderer {
224 struct vn_renderer_info info;
225 struct vn_renderer_ops ops;
226 struct vn_renderer_shmem_ops shmem_ops;
227 struct vn_renderer_bo_ops bo_ops;
228 struct vn_renderer_sync_ops sync_ops;
229 };
230
231 VkResult
232 vn_renderer_create_virtgpu(struct vn_instance *instance,
233 const VkAllocationCallbacks *alloc,
234 struct vn_renderer **renderer);
235
236 VkResult
237 vn_renderer_create_vtest(struct vn_instance *instance,
238 const VkAllocationCallbacks *alloc,
239 struct vn_renderer **renderer);
240
241 static inline VkResult
vn_renderer_create(struct vn_instance * instance,const VkAllocationCallbacks * alloc,struct vn_renderer ** renderer)242 vn_renderer_create(struct vn_instance *instance,
243 const VkAllocationCallbacks *alloc,
244 struct vn_renderer **renderer)
245 {
246 if (VN_DEBUG(VTEST)) {
247 VkResult result = vn_renderer_create_vtest(instance, alloc, renderer);
248 if (result == VK_SUCCESS)
249 return VK_SUCCESS;
250 }
251
252 return vn_renderer_create_virtgpu(instance, alloc, renderer);
253 }
254
255 static inline void
vn_renderer_destroy(struct vn_renderer * renderer,const VkAllocationCallbacks * alloc)256 vn_renderer_destroy(struct vn_renderer *renderer,
257 const VkAllocationCallbacks *alloc)
258 {
259 renderer->ops.destroy(renderer, alloc);
260 }
261
262 static inline VkResult
vn_renderer_submit(struct vn_renderer * renderer,const struct vn_renderer_submit * submit)263 vn_renderer_submit(struct vn_renderer *renderer,
264 const struct vn_renderer_submit *submit)
265 {
266 return renderer->ops.submit(renderer, submit);
267 }
268
269 static inline VkResult
vn_renderer_wait(struct vn_renderer * renderer,const struct vn_renderer_wait * wait)270 vn_renderer_wait(struct vn_renderer *renderer,
271 const struct vn_renderer_wait *wait)
272 {
273 return renderer->ops.wait(renderer, wait);
274 }
275
276 static inline struct vn_renderer_shmem *
vn_renderer_shmem_create(struct vn_renderer * renderer,size_t size)277 vn_renderer_shmem_create(struct vn_renderer *renderer, size_t size)
278 {
279 VN_TRACE_FUNC();
280 struct vn_renderer_shmem *shmem =
281 renderer->shmem_ops.create(renderer, size);
282 if (shmem) {
283 assert(vn_refcount_is_valid(&shmem->refcount));
284 assert(shmem->res_id);
285 assert(shmem->mmap_size >= size);
286 assert(shmem->mmap_ptr);
287 }
288
289 return shmem;
290 }
291
292 static inline struct vn_renderer_shmem *
vn_renderer_shmem_ref(struct vn_renderer * renderer,struct vn_renderer_shmem * shmem)293 vn_renderer_shmem_ref(struct vn_renderer *renderer,
294 struct vn_renderer_shmem *shmem)
295 {
296 vn_refcount_inc(&shmem->refcount);
297 return shmem;
298 }
299
300 static inline void
vn_renderer_shmem_unref(struct vn_renderer * renderer,struct vn_renderer_shmem * shmem)301 vn_renderer_shmem_unref(struct vn_renderer *renderer,
302 struct vn_renderer_shmem *shmem)
303 {
304 if (vn_refcount_dec(&shmem->refcount))
305 renderer->shmem_ops.destroy(renderer, shmem);
306 }
307
308 static inline VkResult
vn_renderer_bo_create_from_device_memory(struct vn_renderer * renderer,VkDeviceSize size,vn_object_id mem_id,VkMemoryPropertyFlags flags,VkExternalMemoryHandleTypeFlags external_handles,struct vn_renderer_bo ** out_bo)309 vn_renderer_bo_create_from_device_memory(
310 struct vn_renderer *renderer,
311 VkDeviceSize size,
312 vn_object_id mem_id,
313 VkMemoryPropertyFlags flags,
314 VkExternalMemoryHandleTypeFlags external_handles,
315 struct vn_renderer_bo **out_bo)
316 {
317 struct vn_renderer_bo *bo;
318 VkResult result = renderer->bo_ops.create_from_device_memory(
319 renderer, size, mem_id, flags, external_handles, &bo);
320 if (result != VK_SUCCESS)
321 return result;
322
323 assert(vn_refcount_is_valid(&bo->refcount));
324 assert(bo->res_id);
325 assert(!bo->mmap_size || bo->mmap_size >= size);
326
327 *out_bo = bo;
328 return VK_SUCCESS;
329 }
330
331 static inline VkResult
vn_renderer_bo_create_from_dma_buf(struct vn_renderer * renderer,VkDeviceSize size,int fd,VkMemoryPropertyFlags flags,struct vn_renderer_bo ** out_bo)332 vn_renderer_bo_create_from_dma_buf(struct vn_renderer *renderer,
333 VkDeviceSize size,
334 int fd,
335 VkMemoryPropertyFlags flags,
336 struct vn_renderer_bo **out_bo)
337 {
338 struct vn_renderer_bo *bo;
339 VkResult result =
340 renderer->bo_ops.create_from_dma_buf(renderer, size, fd, flags, &bo);
341 if (result != VK_SUCCESS)
342 return result;
343
344 assert(vn_refcount_is_valid(&bo->refcount));
345 assert(bo->res_id);
346 assert(!bo->mmap_size || bo->mmap_size >= size);
347
348 *out_bo = bo;
349 return VK_SUCCESS;
350 }
351
352 static inline struct vn_renderer_bo *
vn_renderer_bo_ref(struct vn_renderer * renderer,struct vn_renderer_bo * bo)353 vn_renderer_bo_ref(struct vn_renderer *renderer, struct vn_renderer_bo *bo)
354 {
355 vn_refcount_inc(&bo->refcount);
356 return bo;
357 }
358
359 static inline bool
vn_renderer_bo_unref(struct vn_renderer * renderer,struct vn_renderer_bo * bo)360 vn_renderer_bo_unref(struct vn_renderer *renderer, struct vn_renderer_bo *bo)
361 {
362 if (vn_refcount_dec(&bo->refcount))
363 return renderer->bo_ops.destroy(renderer, bo);
364 return false;
365 }
366
367 static inline int
vn_renderer_bo_export_dma_buf(struct vn_renderer * renderer,struct vn_renderer_bo * bo)368 vn_renderer_bo_export_dma_buf(struct vn_renderer *renderer,
369 struct vn_renderer_bo *bo)
370 {
371 return renderer->bo_ops.export_dma_buf(renderer, bo);
372 }
373
374 static inline void *
vn_renderer_bo_map(struct vn_renderer * renderer,struct vn_renderer_bo * bo)375 vn_renderer_bo_map(struct vn_renderer *renderer, struct vn_renderer_bo *bo)
376 {
377 return renderer->bo_ops.map(renderer, bo);
378 }
379
380 static inline void
vn_renderer_bo_flush(struct vn_renderer * renderer,struct vn_renderer_bo * bo,VkDeviceSize offset,VkDeviceSize end)381 vn_renderer_bo_flush(struct vn_renderer *renderer,
382 struct vn_renderer_bo *bo,
383 VkDeviceSize offset,
384 VkDeviceSize end)
385 {
386 renderer->bo_ops.flush(renderer, bo, offset, end);
387 }
388
389 static inline void
vn_renderer_bo_invalidate(struct vn_renderer * renderer,struct vn_renderer_bo * bo,VkDeviceSize offset,VkDeviceSize size)390 vn_renderer_bo_invalidate(struct vn_renderer *renderer,
391 struct vn_renderer_bo *bo,
392 VkDeviceSize offset,
393 VkDeviceSize size)
394 {
395 renderer->bo_ops.invalidate(renderer, bo, offset, size);
396 }
397
398 static inline VkResult
vn_renderer_sync_create(struct vn_renderer * renderer,uint64_t initial_val,uint32_t flags,struct vn_renderer_sync ** out_sync)399 vn_renderer_sync_create(struct vn_renderer *renderer,
400 uint64_t initial_val,
401 uint32_t flags,
402 struct vn_renderer_sync **out_sync)
403 {
404 return renderer->sync_ops.create(renderer, initial_val, flags, out_sync);
405 }
406
407 static inline VkResult
vn_renderer_sync_create_from_syncobj(struct vn_renderer * renderer,int fd,bool sync_file,struct vn_renderer_sync ** out_sync)408 vn_renderer_sync_create_from_syncobj(struct vn_renderer *renderer,
409 int fd,
410 bool sync_file,
411 struct vn_renderer_sync **out_sync)
412 {
413 return renderer->sync_ops.create_from_syncobj(renderer, fd, sync_file,
414 out_sync);
415 }
416
417 static inline void
vn_renderer_sync_destroy(struct vn_renderer * renderer,struct vn_renderer_sync * sync)418 vn_renderer_sync_destroy(struct vn_renderer *renderer,
419 struct vn_renderer_sync *sync)
420 {
421 renderer->sync_ops.destroy(renderer, sync);
422 }
423
424 static inline int
vn_renderer_sync_export_syncobj(struct vn_renderer * renderer,struct vn_renderer_sync * sync,bool sync_file)425 vn_renderer_sync_export_syncobj(struct vn_renderer *renderer,
426 struct vn_renderer_sync *sync,
427 bool sync_file)
428 {
429 return renderer->sync_ops.export_syncobj(renderer, sync, sync_file);
430 }
431
432 static inline VkResult
vn_renderer_sync_reset(struct vn_renderer * renderer,struct vn_renderer_sync * sync,uint64_t initial_val)433 vn_renderer_sync_reset(struct vn_renderer *renderer,
434 struct vn_renderer_sync *sync,
435 uint64_t initial_val)
436 {
437 return renderer->sync_ops.reset(renderer, sync, initial_val);
438 }
439
440 static inline VkResult
vn_renderer_sync_read(struct vn_renderer * renderer,struct vn_renderer_sync * sync,uint64_t * val)441 vn_renderer_sync_read(struct vn_renderer *renderer,
442 struct vn_renderer_sync *sync,
443 uint64_t *val)
444 {
445 return renderer->sync_ops.read(renderer, sync, val);
446 }
447
448 static inline VkResult
vn_renderer_sync_write(struct vn_renderer * renderer,struct vn_renderer_sync * sync,uint64_t val)449 vn_renderer_sync_write(struct vn_renderer *renderer,
450 struct vn_renderer_sync *sync,
451 uint64_t val)
452 {
453 return renderer->sync_ops.write(renderer, sync, val);
454 }
455
456 #endif /* VN_RENDERER_H */
457