1 /*
2 * Copyright 2019 Google LLC
3 * SPDX-License-Identifier: MIT
4 *
5 * based in part on anv and radv which are:
6 * Copyright © 2015 Intel Corporation
7 * Copyright © 2016 Red Hat.
8 * Copyright © 2016 Bas Nieuwenhuizen
9 */
10
11 #ifndef VN_INSTANCE_H
12 #define VN_INSTANCE_H
13
14 #include "vn_common.h"
15
16 #include "venus-protocol/vn_protocol_driver_defines.h"
17
18 #include "vn_cs.h"
19 #include "vn_renderer.h"
20 #include "vn_renderer_util.h"
21
22 /* require and request at least Vulkan 1.1 at both instance and device levels
23 */
24 #define VN_MIN_RENDERER_VERSION VK_API_VERSION_1_1
25
26 /* max advertised version at both instance and device levels */
27 #if defined(ANDROID_STRICT) && ANDROID_API_LEVEL < 33
28 #define VN_MAX_API_VERSION VK_MAKE_VERSION(1, 1, VK_HEADER_VERSION)
29 #else
30 #define VN_MAX_API_VERSION VK_MAKE_VERSION(1, 3, VK_HEADER_VERSION)
31 #endif
32
33 struct vn_instance {
34 struct vn_instance_base base;
35
36 struct driOptionCache dri_options;
37 struct driOptionCache available_dri_options;
38 bool enable_wsi_multi_plane_modifiers;
39
40 struct vn_renderer *renderer;
41
42 /* for VN_CS_ENCODER_STORAGE_SHMEM_POOL */
43 struct vn_renderer_shmem_pool cs_shmem_pool;
44
45 struct vn_renderer_shmem_pool reply_shmem_pool;
46
47 mtx_t ring_idx_mutex;
48 uint64_t ring_idx_used_mask;
49
50 struct {
51 struct vn_ring *ring;
52 struct list_head tls_rings;
53
54 /* to synchronize renderer/ring */
55 mtx_t roundtrip_mutex;
56 uint64_t roundtrip_next;
57
58 struct vn_watchdog watchdog;
59 } ring;
60
61 /* Between the driver and the app, VN_MAX_API_VERSION is what we advertise
62 * and base.base.app_info.api_version is what the app requests.
63 *
64 * Between the driver and the renderer, renderer_api_version is the api
65 * version we request internally, which can be higher than
66 * base.base.app_info.api_version. renderer_version is the instance
67 * version we can use internally.
68 */
69 uint32_t renderer_api_version;
70 uint32_t renderer_version;
71
72 bool engine_is_zink;
73
74 struct {
75 mtx_t mutex;
76 bool initialized;
77
78 struct vn_physical_device *devices;
79 uint32_t device_count;
80 VkPhysicalDeviceGroupProperties *groups;
81 uint32_t group_count;
82 } physical_device;
83 };
84 VK_DEFINE_HANDLE_CASTS(vn_instance,
85 base.base.base,
86 VkInstance,
87 VK_OBJECT_TYPE_INSTANCE)
88
89 VkResult
90 vn_instance_submit_roundtrip(struct vn_instance *instance,
91 uint64_t *roundtrip_seqno);
92
93 void
94 vn_instance_wait_roundtrip(struct vn_instance *instance,
95 uint64_t roundtrip_seqno);
96
97 static inline void
vn_instance_roundtrip(struct vn_instance * instance)98 vn_instance_roundtrip(struct vn_instance *instance)
99 {
100 uint64_t roundtrip_seqno;
101 if (vn_instance_submit_roundtrip(instance, &roundtrip_seqno) == VK_SUCCESS)
102 vn_instance_wait_roundtrip(instance, roundtrip_seqno);
103 }
104
105 static inline struct vn_renderer_shmem *
vn_instance_cs_shmem_alloc(struct vn_instance * instance,size_t size,size_t * out_offset)106 vn_instance_cs_shmem_alloc(struct vn_instance *instance,
107 size_t size,
108 size_t *out_offset)
109 {
110 return vn_renderer_shmem_pool_alloc(
111 instance->renderer, &instance->cs_shmem_pool, size, out_offset);
112 }
113
114 static inline struct vn_renderer_shmem *
vn_instance_reply_shmem_alloc(struct vn_instance * instance,size_t size,size_t * out_offset)115 vn_instance_reply_shmem_alloc(struct vn_instance *instance,
116 size_t size,
117 size_t *out_offset)
118 {
119 return vn_renderer_shmem_pool_alloc(
120 instance->renderer, &instance->reply_shmem_pool, size, out_offset);
121 }
122
123 static inline int
vn_instance_acquire_ring_idx(struct vn_instance * instance)124 vn_instance_acquire_ring_idx(struct vn_instance *instance)
125 {
126 mtx_lock(&instance->ring_idx_mutex);
127 int ring_idx = ffsll(~instance->ring_idx_used_mask) - 1;
128 if (ring_idx >= instance->renderer->info.max_timeline_count)
129 ring_idx = -1;
130 if (ring_idx > 0)
131 instance->ring_idx_used_mask |= (1ULL << (uint32_t)ring_idx);
132 mtx_unlock(&instance->ring_idx_mutex);
133
134 assert(ring_idx); /* never acquire the dedicated CPU ring */
135
136 /* returns -1 when no vacant rings */
137 return ring_idx;
138 }
139
140 static inline void
vn_instance_release_ring_idx(struct vn_instance * instance,uint32_t ring_idx)141 vn_instance_release_ring_idx(struct vn_instance *instance, uint32_t ring_idx)
142 {
143 assert(ring_idx > 0);
144
145 mtx_lock(&instance->ring_idx_mutex);
146 assert(instance->ring_idx_used_mask & (1ULL << ring_idx));
147 instance->ring_idx_used_mask &= ~(1ULL << ring_idx);
148 mtx_unlock(&instance->ring_idx_mutex);
149 }
150
151 #endif /* VN_INSTANCE_H */
152