1 /*
2 * Copyright 2019 Google LLC
3 * SPDX-License-Identifier: MIT
4 */
5
6 #ifndef VN_RENDERER_H
7 #define VN_RENDERER_H
8
9 #include "vn_common.h"
10
11 struct vn_renderer_shmem {
12 struct vn_refcount refcount;
13
14 uint32_t res_id;
15 size_t mmap_size; /* for internal use only (i.e., munmap) */
16 void *mmap_ptr;
17
18 struct list_head cache_head;
19 int64_t cache_timestamp;
20 };
21
22 struct vn_renderer_bo {
23 struct vn_refcount refcount;
24
25 uint32_t res_id;
26 /* for internal use only */
27 size_t mmap_size;
28 void *mmap_ptr;
29 };
30
31 /*
32 * A sync consists of a uint64_t counter. The counter can be updated by CPU
33 * or by GPU. It can also be waited on by CPU or by GPU until it reaches
34 * certain values.
35 *
36 * This models after timeline VkSemaphore rather than timeline drm_syncobj.
37 * The main difference is that drm_syncobj can have unsignaled value 0.
38 */
39 struct vn_renderer_sync {
40 uint32_t sync_id;
41 };
42
43 struct vn_renderer_info {
44 struct {
45 bool has_primary;
46 int primary_major;
47 int primary_minor;
48 bool has_render;
49 int render_major;
50 int render_minor;
51 } drm;
52
53 struct {
54 uint16_t vendor_id;
55 uint16_t device_id;
56
57 bool has_bus_info;
58 uint16_t domain;
59 uint8_t bus;
60 uint8_t device;
61 uint8_t function;
62 } pci;
63
64 bool has_dma_buf_import;
65 bool has_cache_management;
66 bool has_external_sync;
67 bool has_implicit_fencing;
68 bool has_guest_vram;
69
70 uint32_t max_sync_queue_count;
71
72 /* hw capset */
73 uint32_t wire_format_version;
74 uint32_t vk_xml_version;
75 uint32_t vk_ext_command_serialization_spec_version;
76 uint32_t vk_mesa_venus_protocol_spec_version;
77 uint32_t supports_blob_id_0;
78 /* combined mask for vk_extension_mask1, 2,..., N */
79 uint32_t vk_extension_mask[32];
80 uint32_t allow_vk_wait_syncs;
81 };
82
83 struct vn_renderer_submit_batch {
84 const void *cs_data;
85 size_t cs_size;
86
87 /*
88 * Submit cs to the virtual sync queue identified by sync_queue_index. The
89 * virtual queue is assumed to be associated with the physical VkQueue
90 * identified by vk_queue_id. After the execution completes on the
91 * VkQueue, the virtual sync queue is signaled.
92 *
93 * sync_queue_index must be less than max_sync_queue_count.
94 *
95 * vk_queue_id specifies the object id of a VkQueue.
96 *
97 * When sync_queue_cpu is true, it specifies the special CPU sync queue,
98 * and sync_queue_index/vk_queue_id are ignored. TODO revisit this later
99 */
100 uint32_t sync_queue_index;
101 bool sync_queue_cpu;
102 vn_object_id vk_queue_id;
103
104 /* syncs to update when the virtual sync queue is signaled */
105 struct vn_renderer_sync *const *syncs;
106 /* TODO allow NULL when syncs are all binary? */
107 const uint64_t *sync_values;
108 uint32_t sync_count;
109 };
110
111 struct vn_renderer_submit {
112 /* BOs to pin and to fence implicitly
113 *
114 * TODO track all bos and automatically pin them. We don't do it yet
115 * because each vn_command_buffer owns a bo. We can probably make do by
116 * returning the bos to a bo cache and exclude bo cache from pinning.
117 */
118 struct vn_renderer_bo *const *bos;
119 uint32_t bo_count;
120
121 const struct vn_renderer_submit_batch *batches;
122 uint32_t batch_count;
123 };
124
125 struct vn_renderer_wait {
126 bool wait_any;
127 uint64_t timeout;
128
129 struct vn_renderer_sync *const *syncs;
130 /* TODO allow NULL when syncs are all binary? */
131 const uint64_t *sync_values;
132 uint32_t sync_count;
133 };
134
135 struct vn_renderer_ops {
136 void (*destroy)(struct vn_renderer *renderer,
137 const VkAllocationCallbacks *alloc);
138
139 VkResult (*submit)(struct vn_renderer *renderer,
140 const struct vn_renderer_submit *submit);
141
142 /*
143 * On success, returns VK_SUCCESS or VK_TIMEOUT. On failure, returns
144 * VK_ERROR_DEVICE_LOST or out of device/host memory.
145 */
146 VkResult (*wait)(struct vn_renderer *renderer,
147 const struct vn_renderer_wait *wait);
148 };
149
150 struct vn_renderer_shmem_ops {
151 struct vn_renderer_shmem *(*create)(struct vn_renderer *renderer,
152 size_t size);
153 void (*destroy)(struct vn_renderer *renderer,
154 struct vn_renderer_shmem *shmem);
155 };
156
157 struct vn_renderer_bo_ops {
158 VkResult (*create_from_device_memory)(
159 struct vn_renderer *renderer,
160 VkDeviceSize size,
161 vn_object_id mem_id,
162 VkMemoryPropertyFlags flags,
163 VkExternalMemoryHandleTypeFlags external_handles,
164 struct vn_renderer_bo **out_bo);
165
166 VkResult (*create_from_dma_buf)(struct vn_renderer *renderer,
167 VkDeviceSize size,
168 int fd,
169 VkMemoryPropertyFlags flags,
170 struct vn_renderer_bo **out_bo);
171
172 bool (*destroy)(struct vn_renderer *renderer, struct vn_renderer_bo *bo);
173
174 int (*export_dma_buf)(struct vn_renderer *renderer,
175 struct vn_renderer_bo *bo);
176
177 /* map is not thread-safe */
178 void *(*map)(struct vn_renderer *renderer, struct vn_renderer_bo *bo);
179
180 void (*flush)(struct vn_renderer *renderer,
181 struct vn_renderer_bo *bo,
182 VkDeviceSize offset,
183 VkDeviceSize size);
184 void (*invalidate)(struct vn_renderer *renderer,
185 struct vn_renderer_bo *bo,
186 VkDeviceSize offset,
187 VkDeviceSize size);
188 };
189
190 enum vn_renderer_sync_flags {
191 VN_RENDERER_SYNC_SHAREABLE = 1u << 0,
192 VN_RENDERER_SYNC_BINARY = 1u << 1,
193 };
194
195 struct vn_renderer_sync_ops {
196 VkResult (*create)(struct vn_renderer *renderer,
197 uint64_t initial_val,
198 uint32_t flags,
199 struct vn_renderer_sync **out_sync);
200
201 VkResult (*create_from_syncobj)(struct vn_renderer *renderer,
202 int fd,
203 bool sync_file,
204 struct vn_renderer_sync **out_sync);
205 void (*destroy)(struct vn_renderer *renderer,
206 struct vn_renderer_sync *sync);
207
208 int (*export_syncobj)(struct vn_renderer *renderer,
209 struct vn_renderer_sync *sync,
210 bool sync_file);
211
212 /* reset the counter */
213 VkResult (*reset)(struct vn_renderer *renderer,
214 struct vn_renderer_sync *sync,
215 uint64_t initial_val);
216
217 /* read the current value from the counter */
218 VkResult (*read)(struct vn_renderer *renderer,
219 struct vn_renderer_sync *sync,
220 uint64_t *val);
221
222 /* write a new value (larger than the current one) to the counter */
223 VkResult (*write)(struct vn_renderer *renderer,
224 struct vn_renderer_sync *sync,
225 uint64_t val);
226 };
227
228 struct vn_renderer {
229 struct vn_renderer_info info;
230 struct vn_renderer_ops ops;
231 struct vn_renderer_shmem_ops shmem_ops;
232 struct vn_renderer_bo_ops bo_ops;
233 struct vn_renderer_sync_ops sync_ops;
234 };
235
236 VkResult
237 vn_renderer_create_virtgpu(struct vn_instance *instance,
238 const VkAllocationCallbacks *alloc,
239 struct vn_renderer **renderer);
240
241 VkResult
242 vn_renderer_create_vtest(struct vn_instance *instance,
243 const VkAllocationCallbacks *alloc,
244 struct vn_renderer **renderer);
245
246 static inline VkResult
vn_renderer_create(struct vn_instance * instance,const VkAllocationCallbacks * alloc,struct vn_renderer ** renderer)247 vn_renderer_create(struct vn_instance *instance,
248 const VkAllocationCallbacks *alloc,
249 struct vn_renderer **renderer)
250 {
251 if (VN_DEBUG(VTEST)) {
252 VkResult result = vn_renderer_create_vtest(instance, alloc, renderer);
253 if (result == VK_SUCCESS)
254 return VK_SUCCESS;
255 }
256
257 return vn_renderer_create_virtgpu(instance, alloc, renderer);
258 }
259
260 static inline void
vn_renderer_destroy(struct vn_renderer * renderer,const VkAllocationCallbacks * alloc)261 vn_renderer_destroy(struct vn_renderer *renderer,
262 const VkAllocationCallbacks *alloc)
263 {
264 renderer->ops.destroy(renderer, alloc);
265 }
266
267 static inline VkResult
vn_renderer_submit(struct vn_renderer * renderer,const struct vn_renderer_submit * submit)268 vn_renderer_submit(struct vn_renderer *renderer,
269 const struct vn_renderer_submit *submit)
270 {
271 return renderer->ops.submit(renderer, submit);
272 }
273
274 static inline VkResult
vn_renderer_wait(struct vn_renderer * renderer,const struct vn_renderer_wait * wait)275 vn_renderer_wait(struct vn_renderer *renderer,
276 const struct vn_renderer_wait *wait)
277 {
278 return renderer->ops.wait(renderer, wait);
279 }
280
281 static inline struct vn_renderer_shmem *
vn_renderer_shmem_create(struct vn_renderer * renderer,size_t size)282 vn_renderer_shmem_create(struct vn_renderer *renderer, size_t size)
283 {
284 VN_TRACE_FUNC();
285 struct vn_renderer_shmem *shmem =
286 renderer->shmem_ops.create(renderer, size);
287 if (shmem) {
288 assert(vn_refcount_is_valid(&shmem->refcount));
289 assert(shmem->res_id);
290 assert(shmem->mmap_size >= size);
291 assert(shmem->mmap_ptr);
292 }
293
294 return shmem;
295 }
296
297 static inline struct vn_renderer_shmem *
vn_renderer_shmem_ref(struct vn_renderer * renderer,struct vn_renderer_shmem * shmem)298 vn_renderer_shmem_ref(struct vn_renderer *renderer,
299 struct vn_renderer_shmem *shmem)
300 {
301 vn_refcount_inc(&shmem->refcount);
302 return shmem;
303 }
304
305 static inline void
vn_renderer_shmem_unref(struct vn_renderer * renderer,struct vn_renderer_shmem * shmem)306 vn_renderer_shmem_unref(struct vn_renderer *renderer,
307 struct vn_renderer_shmem *shmem)
308 {
309 if (vn_refcount_dec(&shmem->refcount))
310 renderer->shmem_ops.destroy(renderer, shmem);
311 }
312
313 static inline VkResult
vn_renderer_bo_create_from_device_memory(struct vn_renderer * renderer,VkDeviceSize size,vn_object_id mem_id,VkMemoryPropertyFlags flags,VkExternalMemoryHandleTypeFlags external_handles,struct vn_renderer_bo ** out_bo)314 vn_renderer_bo_create_from_device_memory(
315 struct vn_renderer *renderer,
316 VkDeviceSize size,
317 vn_object_id mem_id,
318 VkMemoryPropertyFlags flags,
319 VkExternalMemoryHandleTypeFlags external_handles,
320 struct vn_renderer_bo **out_bo)
321 {
322 struct vn_renderer_bo *bo;
323 VkResult result = renderer->bo_ops.create_from_device_memory(
324 renderer, size, mem_id, flags, external_handles, &bo);
325 if (result != VK_SUCCESS)
326 return result;
327
328 assert(vn_refcount_is_valid(&bo->refcount));
329 assert(bo->res_id);
330 assert(!bo->mmap_size || bo->mmap_size >= size);
331
332 *out_bo = bo;
333 return VK_SUCCESS;
334 }
335
336 static inline VkResult
vn_renderer_bo_create_from_dma_buf(struct vn_renderer * renderer,VkDeviceSize size,int fd,VkMemoryPropertyFlags flags,struct vn_renderer_bo ** out_bo)337 vn_renderer_bo_create_from_dma_buf(struct vn_renderer *renderer,
338 VkDeviceSize size,
339 int fd,
340 VkMemoryPropertyFlags flags,
341 struct vn_renderer_bo **out_bo)
342 {
343 struct vn_renderer_bo *bo;
344 VkResult result =
345 renderer->bo_ops.create_from_dma_buf(renderer, size, fd, flags, &bo);
346 if (result != VK_SUCCESS)
347 return result;
348
349 assert(vn_refcount_is_valid(&bo->refcount));
350 assert(bo->res_id);
351 assert(!bo->mmap_size || bo->mmap_size >= size);
352
353 *out_bo = bo;
354 return VK_SUCCESS;
355 }
356
357 static inline struct vn_renderer_bo *
vn_renderer_bo_ref(struct vn_renderer * renderer,struct vn_renderer_bo * bo)358 vn_renderer_bo_ref(struct vn_renderer *renderer, struct vn_renderer_bo *bo)
359 {
360 vn_refcount_inc(&bo->refcount);
361 return bo;
362 }
363
364 static inline bool
vn_renderer_bo_unref(struct vn_renderer * renderer,struct vn_renderer_bo * bo)365 vn_renderer_bo_unref(struct vn_renderer *renderer, struct vn_renderer_bo *bo)
366 {
367 if (vn_refcount_dec(&bo->refcount))
368 return renderer->bo_ops.destroy(renderer, bo);
369 return false;
370 }
371
372 static inline int
vn_renderer_bo_export_dma_buf(struct vn_renderer * renderer,struct vn_renderer_bo * bo)373 vn_renderer_bo_export_dma_buf(struct vn_renderer *renderer,
374 struct vn_renderer_bo *bo)
375 {
376 return renderer->bo_ops.export_dma_buf(renderer, bo);
377 }
378
379 static inline void *
vn_renderer_bo_map(struct vn_renderer * renderer,struct vn_renderer_bo * bo)380 vn_renderer_bo_map(struct vn_renderer *renderer, struct vn_renderer_bo *bo)
381 {
382 return renderer->bo_ops.map(renderer, bo);
383 }
384
385 static inline void
vn_renderer_bo_flush(struct vn_renderer * renderer,struct vn_renderer_bo * bo,VkDeviceSize offset,VkDeviceSize end)386 vn_renderer_bo_flush(struct vn_renderer *renderer,
387 struct vn_renderer_bo *bo,
388 VkDeviceSize offset,
389 VkDeviceSize end)
390 {
391 renderer->bo_ops.flush(renderer, bo, offset, end);
392 }
393
394 static inline void
vn_renderer_bo_invalidate(struct vn_renderer * renderer,struct vn_renderer_bo * bo,VkDeviceSize offset,VkDeviceSize size)395 vn_renderer_bo_invalidate(struct vn_renderer *renderer,
396 struct vn_renderer_bo *bo,
397 VkDeviceSize offset,
398 VkDeviceSize size)
399 {
400 renderer->bo_ops.invalidate(renderer, bo, offset, size);
401 }
402
403 static inline VkResult
vn_renderer_sync_create(struct vn_renderer * renderer,uint64_t initial_val,uint32_t flags,struct vn_renderer_sync ** out_sync)404 vn_renderer_sync_create(struct vn_renderer *renderer,
405 uint64_t initial_val,
406 uint32_t flags,
407 struct vn_renderer_sync **out_sync)
408 {
409 return renderer->sync_ops.create(renderer, initial_val, flags, out_sync);
410 }
411
412 static inline VkResult
vn_renderer_sync_create_from_syncobj(struct vn_renderer * renderer,int fd,bool sync_file,struct vn_renderer_sync ** out_sync)413 vn_renderer_sync_create_from_syncobj(struct vn_renderer *renderer,
414 int fd,
415 bool sync_file,
416 struct vn_renderer_sync **out_sync)
417 {
418 return renderer->sync_ops.create_from_syncobj(renderer, fd, sync_file,
419 out_sync);
420 }
421
422 static inline void
vn_renderer_sync_destroy(struct vn_renderer * renderer,struct vn_renderer_sync * sync)423 vn_renderer_sync_destroy(struct vn_renderer *renderer,
424 struct vn_renderer_sync *sync)
425 {
426 renderer->sync_ops.destroy(renderer, sync);
427 }
428
429 static inline int
vn_renderer_sync_export_syncobj(struct vn_renderer * renderer,struct vn_renderer_sync * sync,bool sync_file)430 vn_renderer_sync_export_syncobj(struct vn_renderer *renderer,
431 struct vn_renderer_sync *sync,
432 bool sync_file)
433 {
434 return renderer->sync_ops.export_syncobj(renderer, sync, sync_file);
435 }
436
437 static inline VkResult
vn_renderer_sync_reset(struct vn_renderer * renderer,struct vn_renderer_sync * sync,uint64_t initial_val)438 vn_renderer_sync_reset(struct vn_renderer *renderer,
439 struct vn_renderer_sync *sync,
440 uint64_t initial_val)
441 {
442 return renderer->sync_ops.reset(renderer, sync, initial_val);
443 }
444
445 static inline VkResult
vn_renderer_sync_read(struct vn_renderer * renderer,struct vn_renderer_sync * sync,uint64_t * val)446 vn_renderer_sync_read(struct vn_renderer *renderer,
447 struct vn_renderer_sync *sync,
448 uint64_t *val)
449 {
450 return renderer->sync_ops.read(renderer, sync, val);
451 }
452
453 static inline VkResult
vn_renderer_sync_write(struct vn_renderer * renderer,struct vn_renderer_sync * sync,uint64_t val)454 vn_renderer_sync_write(struct vn_renderer *renderer,
455 struct vn_renderer_sync *sync,
456 uint64_t val)
457 {
458 return renderer->sync_ops.write(renderer, sync, val);
459 }
460
461 #endif /* VN_RENDERER_H */
462