1 /*
2 * Copyright 2019 Google LLC
3 * SPDX-License-Identifier: MIT
4 *
5 * based in part on anv and radv which are:
6 * Copyright © 2015 Intel Corporation
7 * Copyright © 2016 Red Hat.
8 * Copyright © 2016 Bas Nieuwenhuizen
9 */
10
11 #include "vn_common.h"
12
13 #include <stdarg.h>
14
15 #include "util/log.h"
16 #include "util/os_misc.h"
17 #include "util/u_debug.h"
18 #include "venus-protocol/vn_protocol_driver_info.h"
19 #include "vk_enum_to_str.h"
20
21 #include "vn_instance.h"
22 #include "vn_ring.h"
23
24 #define VN_RELAX_MIN_BASE_SLEEP_US (160)
25
26 static const struct debug_control vn_debug_options[] = {
27 /* clang-format off */
28 { "init", VN_DEBUG_INIT },
29 { "result", VN_DEBUG_RESULT },
30 { "vtest", VN_DEBUG_VTEST },
31 { "wsi", VN_DEBUG_WSI },
32 { "no_abort", VN_DEBUG_NO_ABORT },
33 { "log_ctx_info", VN_DEBUG_LOG_CTX_INFO },
34 { "cache", VN_DEBUG_CACHE },
35 { "no_sparse", VN_DEBUG_NO_SPARSE },
36 { "no_gpl", VN_DEBUG_NO_GPL },
37 { NULL, 0 },
38 /* clang-format on */
39 };
40
41 static const struct debug_control vn_perf_options[] = {
42 /* clang-format off */
43 { "no_async_set_alloc", VN_PERF_NO_ASYNC_SET_ALLOC },
44 { "no_async_buffer_create", VN_PERF_NO_ASYNC_BUFFER_CREATE },
45 { "no_async_queue_submit", VN_PERF_NO_ASYNC_QUEUE_SUBMIT },
46 { "no_event_feedback", VN_PERF_NO_EVENT_FEEDBACK },
47 { "no_fence_feedback", VN_PERF_NO_FENCE_FEEDBACK },
48 { "no_memory_suballoc", VN_PERF_NO_MEMORY_SUBALLOC },
49 { "no_cmd_batching", VN_PERF_NO_CMD_BATCHING },
50 { "no_semaphore_feedback", VN_PERF_NO_SEMAPHORE_FEEDBACK },
51 { "no_query_feedback", VN_PERF_NO_QUERY_FEEDBACK },
52 { "no_async_mem_alloc", VN_PERF_NO_ASYNC_MEM_ALLOC },
53 { "no_tiled_wsi_image", VN_PERF_NO_TILED_WSI_IMAGE },
54 { "no_multi_ring", VN_PERF_NO_MULTI_RING },
55 { "no_async_image_create", VN_PERF_NO_ASYNC_IMAGE_CREATE },
56 { "no_async_image_format", VN_PERF_NO_ASYNC_IMAGE_FORMAT },
57 { NULL, 0 },
58 /* clang-format on */
59 };
60
61 uint64_t vn_next_obj_id = 1;
62 struct vn_env vn_env;
63
64 static void
vn_env_init_once(void)65 vn_env_init_once(void)
66 {
67 vn_env.debug =
68 parse_debug_string(os_get_option("VN_DEBUG"), vn_debug_options);
69 vn_env.perf =
70 parse_debug_string(os_get_option("VN_PERF"), vn_perf_options);
71 vn_env.draw_cmd_batch_limit =
72 debug_get_num_option("VN_DRAW_CMD_BATCH_LIMIT", UINT32_MAX);
73 if (!vn_env.draw_cmd_batch_limit)
74 vn_env.draw_cmd_batch_limit = UINT32_MAX;
75 vn_env.relax_base_sleep_us = debug_get_num_option(
76 "VN_RELAX_BASE_SLEEP_US", VN_RELAX_MIN_BASE_SLEEP_US);
77 }
78
79 void
vn_env_init(void)80 vn_env_init(void)
81 {
82 static once_flag once = ONCE_FLAG_INIT;
83 call_once(&once, vn_env_init_once);
84
85 /* log per VkInstance creation */
86 if (VN_DEBUG(INIT)) {
87 vn_log(NULL,
88 "vn_env is as below:"
89 "\n\tdebug = 0x%" PRIx64 ""
90 "\n\tperf = 0x%" PRIx64 ""
91 "\n\tdraw_cmd_batch_limit = %u"
92 "\n\trelax_base_sleep_us = %u",
93 vn_env.debug, vn_env.perf, vn_env.draw_cmd_batch_limit,
94 vn_env.relax_base_sleep_us);
95 }
96 }
97
98 void
vn_trace_init(void)99 vn_trace_init(void)
100 {
101 #if DETECT_OS_ANDROID
102 atrace_init();
103 #else
104 util_cpu_trace_init();
105 #endif
106 }
107
108 void
vn_log(struct vn_instance * instance,const char * format,...)109 vn_log(struct vn_instance *instance, const char *format, ...)
110 {
111 va_list ap;
112
113 va_start(ap, format);
114 mesa_log_v(MESA_LOG_DEBUG, "MESA-VIRTIO", format, ap);
115 va_end(ap);
116
117 /* instance may be NULL or partially initialized */
118 }
119
120 VkResult
vn_log_result(struct vn_instance * instance,VkResult result,const char * where)121 vn_log_result(struct vn_instance *instance,
122 VkResult result,
123 const char *where)
124 {
125 vn_log(instance, "%s: %s", where, vk_Result_to_str(result));
126 return result;
127 }
128
129 uint32_t
vn_extension_get_spec_version(const char * name)130 vn_extension_get_spec_version(const char *name)
131 {
132 const int32_t index = vn_info_extension_index(name);
133 return index >= 0 ? vn_info_extension_get(index)->spec_version : 0;
134 }
135
136 static inline bool
vn_watchdog_timeout(const struct vn_watchdog * watchdog)137 vn_watchdog_timeout(const struct vn_watchdog *watchdog)
138 {
139 return !watchdog->alive;
140 }
141
142 static inline void
vn_watchdog_release(struct vn_watchdog * watchdog)143 vn_watchdog_release(struct vn_watchdog *watchdog)
144 {
145 if (vn_gettid() == watchdog->tid) {
146 watchdog->tid = 0;
147 mtx_unlock(&watchdog->mutex);
148 }
149 }
150
151 static bool
vn_watchdog_acquire(struct vn_watchdog * watchdog,bool alive)152 vn_watchdog_acquire(struct vn_watchdog *watchdog, bool alive)
153 {
154 pid_t tid = vn_gettid();
155 if (!watchdog->tid && tid != watchdog->tid &&
156 mtx_trylock(&watchdog->mutex) == thrd_success) {
157 /* register as the only waiting thread that monitors the ring. */
158 watchdog->tid = tid;
159 }
160
161 if (tid != watchdog->tid)
162 return false;
163
164 watchdog->alive = alive;
165 return true;
166 }
167
168 void
vn_relax_fini(struct vn_relax_state * state)169 vn_relax_fini(struct vn_relax_state *state)
170 {
171 vn_watchdog_release(&state->instance->ring.watchdog);
172 }
173
174 struct vn_relax_state
vn_relax_init(struct vn_instance * instance,const char * reason)175 vn_relax_init(struct vn_instance *instance, const char *reason)
176 {
177 struct vn_ring *ring = instance->ring.ring;
178 struct vn_watchdog *watchdog = &instance->ring.watchdog;
179 if (vn_watchdog_acquire(watchdog, true))
180 vn_ring_unset_status_bits(ring, VK_RING_STATUS_ALIVE_BIT_MESA);
181
182 return (struct vn_relax_state){
183 .instance = instance,
184 .iter = 0,
185 .reason = reason,
186 };
187 }
188
189 void
vn_relax(struct vn_relax_state * state)190 vn_relax(struct vn_relax_state *state)
191 {
192 uint32_t *iter = &state->iter;
193 const char *reason = state->reason;
194
195 /* Yield for the first 2^busy_wait_order times and then sleep for
196 * base_sleep_us microseconds for the same number of times. After that,
197 * keep doubling both sleep length and count.
198 * Must also update pre-calculated "first_warn_time" in vn_relax_init().
199 */
200 const uint32_t busy_wait_order = 8;
201 const uint32_t base_sleep_us = vn_env.relax_base_sleep_us;
202 const uint32_t warn_order = 12;
203 const uint32_t abort_order = 16;
204
205 (*iter)++;
206 if (*iter < (1 << busy_wait_order)) {
207 thrd_yield();
208 return;
209 }
210
211 /* warn occasionally if we have slept at least 1.28ms for 2048 times (plus
212 * another 2047 shorter sleeps)
213 */
214 if (unlikely(*iter % (1 << warn_order) == 0)) {
215 struct vn_instance *instance = state->instance;
216 vn_log(instance, "stuck in %s wait with iter at %d", reason, *iter);
217
218 struct vn_ring *ring = instance->ring.ring;
219 const uint32_t status = vn_ring_load_status(ring);
220 if (status & VK_RING_STATUS_FATAL_BIT_MESA) {
221 vn_log(instance, "aborting on ring fatal error at iter %d", *iter);
222 abort();
223 }
224
225 struct vn_watchdog *watchdog = &instance->ring.watchdog;
226 const bool alive = status & VK_RING_STATUS_ALIVE_BIT_MESA;
227 if (vn_watchdog_acquire(watchdog, alive))
228 vn_ring_unset_status_bits(ring, VK_RING_STATUS_ALIVE_BIT_MESA);
229
230 if (vn_watchdog_timeout(watchdog) && !VN_DEBUG(NO_ABORT)) {
231 vn_log(instance, "aborting on expired ring alive status at iter %d",
232 *iter);
233 abort();
234 }
235
236 if (*iter >= (1 << abort_order) && !VN_DEBUG(NO_ABORT)) {
237 vn_log(instance, "aborting");
238 abort();
239 }
240 }
241
242 const uint32_t shift = util_last_bit(*iter) - busy_wait_order - 1;
243 os_time_sleep(base_sleep_us << shift);
244 }
245
246 struct vn_ring *
vn_tls_get_ring(struct vn_instance * instance)247 vn_tls_get_ring(struct vn_instance *instance)
248 {
249 if (VN_PERF(NO_MULTI_RING))
250 return instance->ring.ring;
251
252 struct vn_tls *tls = vn_tls_get();
253 if (unlikely(!tls)) {
254 /* only allow to fallback on missing tls */
255 return instance->ring.ring;
256 }
257
258 /* look up tls_ring owned by instance */
259 list_for_each_entry(struct vn_tls_ring, tls_ring, &tls->tls_rings,
260 tls_head) {
261 mtx_lock(&tls_ring->mutex);
262 if (tls_ring->instance == instance) {
263 mtx_unlock(&tls_ring->mutex);
264 assert(tls_ring->ring);
265 return tls_ring->ring;
266 }
267 mtx_unlock(&tls_ring->mutex);
268 }
269
270 struct vn_tls_ring *tls_ring = calloc(1, sizeof(*tls_ring));
271 if (!tls_ring)
272 return NULL;
273
274 /* keep the extra for potential roundtrip sync on tls ring */
275 static const size_t extra_size = sizeof(uint32_t);
276
277 /* only need a small ring for synchronous cmds on tls ring */
278 static const size_t buf_size = 16 * 1024;
279
280 /* single cmd can use the entire ring shmem on tls ring */
281 static const uint8_t direct_order = 0;
282
283 struct vn_ring_layout layout;
284 vn_ring_get_layout(buf_size, extra_size, &layout);
285
286 tls_ring->ring = vn_ring_create(instance, &layout, direct_order);
287 if (!tls_ring->ring) {
288 free(tls_ring);
289 return NULL;
290 }
291
292 mtx_init(&tls_ring->mutex, mtx_plain);
293 tls_ring->instance = instance;
294 list_add(&tls_ring->tls_head, &tls->tls_rings);
295 list_add(&tls_ring->vk_head, &instance->ring.tls_rings);
296
297 return tls_ring->ring;
298 }
299
300 void
vn_tls_destroy_ring(struct vn_tls_ring * tls_ring)301 vn_tls_destroy_ring(struct vn_tls_ring *tls_ring)
302 {
303 mtx_lock(&tls_ring->mutex);
304 if (tls_ring->ring) {
305 vn_ring_destroy(tls_ring->ring);
306 tls_ring->ring = NULL;
307 tls_ring->instance = NULL;
308 mtx_unlock(&tls_ring->mutex);
309 } else {
310 mtx_unlock(&tls_ring->mutex);
311 mtx_destroy(&tls_ring->mutex);
312 free(tls_ring);
313 }
314 }
315
316 static void
vn_tls_free(void * tls)317 vn_tls_free(void *tls)
318 {
319 if (tls) {
320 list_for_each_entry_safe(struct vn_tls_ring, tls_ring,
321 &((struct vn_tls *)tls)->tls_rings, tls_head)
322 vn_tls_destroy_ring(tls_ring);
323 }
324 free(tls);
325 }
326
327 static tss_t vn_tls_key;
328 static bool vn_tls_key_valid;
329
330 static void
vn_tls_key_create_once(void)331 vn_tls_key_create_once(void)
332 {
333 vn_tls_key_valid = tss_create(&vn_tls_key, vn_tls_free) == thrd_success;
334 if (!vn_tls_key_valid && VN_DEBUG(INIT))
335 vn_log(NULL, "WARNING: failed to create vn_tls_key");
336 }
337
338 struct vn_tls *
vn_tls_get(void)339 vn_tls_get(void)
340 {
341 static once_flag once = ONCE_FLAG_INIT;
342 call_once(&once, vn_tls_key_create_once);
343 if (unlikely(!vn_tls_key_valid))
344 return NULL;
345
346 struct vn_tls *tls = tss_get(vn_tls_key);
347 if (likely(tls))
348 return tls;
349
350 tls = calloc(1, sizeof(*tls));
351 if (!tls)
352 return NULL;
353
354 /* initialize tls */
355 tls->async_pipeline_create = false;
356 list_inithead(&tls->tls_rings);
357
358 if (tss_set(vn_tls_key, tls) != thrd_success) {
359 free(tls);
360 return NULL;
361 }
362
363 return tls;
364 }
365