• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2024 Igalia S.L.
3  * SPDX-License-Identifier: MIT
4  */
5 
6 #include "tu_rmv.h"
7 
8 #include "tu_buffer.h"
9 #include "tu_cmd_buffer.h"
10 #include "tu_cs.h"
11 #include "tu_device.h"
12 #include "tu_event.h"
13 #include "tu_image.h"
14 #include "tu_queue.h"
15 #include "tu_query_pool.h"
16 
17 #include <cstdio>
18 
19 static VkResult
capture_trace(VkQueue _queue)20 capture_trace(VkQueue _queue)
21 {
22    VK_FROM_HANDLE(tu_queue, queue, _queue);
23    struct tu_device *device = queue->device;
24    assert(device->vk.memory_trace_data.is_enabled);
25 
26    simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
27 
28    vk_dump_rmv_capture(&queue->device->vk.memory_trace_data);
29 
30    simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
31    return VK_SUCCESS;
32 }
33 
34 static void
tu_rmv_fill_device_info(struct tu_device * device,struct vk_rmv_device_info * info)35 tu_rmv_fill_device_info(struct tu_device *device,
36                         struct vk_rmv_device_info *info)
37 {
38    struct tu_physical_device *physical_device = device->physical_device;
39 
40    /* Turnip backends only set up a single device-local heap. When available,
41     * the kernel-provided VA range is used, otherwise we fall back to that
42     * heap's calculated size.
43     */
44    struct vk_rmv_memory_info *device_local_memory_info =
45       &info->memory_infos[VK_RMV_MEMORY_LOCATION_DEVICE];
46    if (physical_device->has_set_iova) {
47       *device_local_memory_info = {
48          .size = physical_device->va_size,
49          .physical_base_address = physical_device->va_start,
50       };
51    } else {
52       *device_local_memory_info = {
53          .size = physical_device->heap.size, .physical_base_address = 0,
54       };
55    }
56 
57    info->memory_infos[VK_RMV_MEMORY_LOCATION_DEVICE_INVISIBLE] = {
58       .size = 0, .physical_base_address = 0,
59    };
60    info->memory_infos[VK_RMV_MEMORY_LOCATION_HOST] = {
61       .size = 0, .physical_base_address = 0,
62    };
63 
64    /* No PCI-e information to provide. Instead, we can include the device's
65     * chip ID in the device name string.
66     */
67    snprintf(info->device_name, sizeof(info->device_name), "%s (0x%" PRIx64 ")",
68       physical_device->name, physical_device->dev_id.chip_id);
69    info->pcie_family_id = info->pcie_revision_id = info->pcie_device_id = 0;
70 
71    /* TODO: provide relevant information here. */
72    info->vram_type = VK_RMV_MEMORY_TYPE_LPDDR5;
73    info->vram_operations_per_clock = info->vram_bus_width = info->vram_bandwidth = 1;
74    info->minimum_shader_clock = info->minimum_memory_clock = 0;
75    info->maximum_shader_clock = info->maximum_memory_clock = 1;
76 }
77 
78 void
tu_memory_trace_init(struct tu_device * device)79 tu_memory_trace_init(struct tu_device *device)
80 {
81    struct vk_rmv_device_info info;
82    memset(&info, 0, sizeof(info));
83    tu_rmv_fill_device_info(device, &info);
84 
85    vk_memory_trace_init(&device->vk, &info);
86    if (!device->vk.memory_trace_data.is_enabled)
87       return;
88 
89    device->vk.capture_trace = capture_trace;
90 }
91 
92 void
tu_memory_trace_finish(struct tu_device * device)93 tu_memory_trace_finish(struct tu_device *device)
94 {
95    vk_memory_trace_finish(&device->vk);
96 }
97 
98 static inline uint32_t
tu_rmv_get_resource_id_locked(struct tu_device * device,const void * resource)99 tu_rmv_get_resource_id_locked(struct tu_device *device, const void *resource)
100 {
101    return vk_rmv_get_resource_id_locked(&device->vk, (uint64_t) resource);
102 }
103 
104 static inline void
tu_rmv_destroy_resource_id_locked(struct tu_device * device,const void * resource)105 tu_rmv_destroy_resource_id_locked(struct tu_device *device,
106                                   const void *resource)
107 {
108    vk_rmv_destroy_resource_id_locked(&device->vk, (uint64_t) resource);
109 }
110 
111 static inline void
tu_rmv_emit_resource_bind_locked(struct tu_device * device,uint32_t resource_id,uint64_t address,uint64_t size)112 tu_rmv_emit_resource_bind_locked(struct tu_device *device, uint32_t resource_id,
113                                  uint64_t address, uint64_t size)
114 {
115    struct vk_rmv_resource_bind_token token = {
116       .address = address,
117       .size = size,
118       .is_system_memory = false,
119       .resource_id = resource_id,
120    };
121    vk_rmv_emit_token(&device->vk.memory_trace_data,
122                      VK_RMV_TOKEN_TYPE_RESOURCE_BIND, &token);
123 }
124 
125 static inline void
tu_rmv_emit_cpu_map_locked(struct tu_device * device,uint64_t address,bool unmapped)126 tu_rmv_emit_cpu_map_locked(struct tu_device *device, uint64_t address,
127                            bool unmapped)
128 {
129    struct vk_rmv_cpu_map_token token = {
130       .address = address,
131       .unmapped = unmapped,
132    };
133    vk_rmv_emit_token(&device->vk.memory_trace_data,
134                      VK_RMV_TOKEN_TYPE_CPU_MAP, &token);
135 }
136 
137 static inline void
tu_rmv_emit_page_table_update_locked(struct tu_device * device,struct tu_bo * bo,bool is_unmap)138 tu_rmv_emit_page_table_update_locked(struct tu_device *device, struct tu_bo *bo,
139                                      bool is_unmap)
140 {
141    /* These tokens are mainly useful for RMV to properly associate buffer
142     * allocations and deallocations to a specific memory domain.
143     */
144    struct vk_rmv_page_table_update_token token = {
145       .virtual_address = bo->iova,
146       .physical_address = bo->iova,
147       .page_count = DIV_ROUND_UP(bo->size, 4096),
148       .page_size = 4096,
149       .pid = 0,
150       .is_unmap = is_unmap,
151       .type = VK_RMV_PAGE_TABLE_UPDATE_TYPE_UPDATE,
152    };
153    vk_rmv_emit_token(&device->vk.memory_trace_data,
154                      VK_RMV_TOKEN_TYPE_PAGE_TABLE_UPDATE, &token);
155 }
156 
157 void
tu_rmv_log_heap_create(struct tu_device * device,const VkMemoryAllocateInfo * allocate_info,struct tu_device_memory * device_memory)158 tu_rmv_log_heap_create(struct tu_device *device,
159                        const VkMemoryAllocateInfo *allocate_info,
160                        struct tu_device_memory *device_memory)
161 {
162    const VkMemoryAllocateFlagsInfo *flags_info = vk_find_struct_const(
163       allocate_info->pNext, MEMORY_ALLOCATE_FLAGS_INFO);
164 
165    simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
166 
167    struct vk_rmv_resource_create_token token = {
168       .resource_id = tu_rmv_get_resource_id_locked(device, device_memory),
169       .is_driver_internal = false,
170       .type = VK_RMV_RESOURCE_TYPE_HEAP,
171       .heap = {
172          .alloc_flags = flags_info ? flags_info->flags : 0,
173          .size = device_memory->bo->size,
174          .alignment = 4096,
175          .heap_index = VK_RMV_MEMORY_LOCATION_DEVICE,
176       },
177    };
178    vk_rmv_emit_token(&device->vk.memory_trace_data,
179                      VK_RMV_TOKEN_TYPE_RESOURCE_CREATE, &token);
180 
181    tu_rmv_emit_resource_bind_locked(device, token.resource_id,
182                                     device_memory->bo->iova,
183                                     device_memory->bo->size);
184 
185    simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
186 }
187 
188 void
tu_rmv_log_bo_allocate(struct tu_device * device,struct tu_bo * bo)189 tu_rmv_log_bo_allocate(struct tu_device *device, struct tu_bo *bo)
190 {
191    simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
192 
193    tu_rmv_emit_page_table_update_locked(device, bo, false);
194 
195    struct vk_rmv_virtual_allocate_token virtual_allocate_token = {
196       .page_count = DIV_ROUND_UP(bo->size, 4096),
197       .is_driver_internal = false,
198       .is_in_invisible_vram = false,
199       .address = bo->iova,
200       .preferred_domains = VK_RMV_KERNEL_MEMORY_DOMAIN_VRAM,
201    };
202    vk_rmv_emit_token(&device->vk.memory_trace_data,
203                      VK_RMV_TOKEN_TYPE_VIRTUAL_ALLOCATE,
204                      &virtual_allocate_token);
205 
206    simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
207 }
208 
209 void
tu_rmv_log_bo_destroy(struct tu_device * device,struct tu_bo * bo)210 tu_rmv_log_bo_destroy(struct tu_device *device, struct tu_bo *bo)
211 {
212    simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
213 
214    struct vk_rmv_virtual_free_token virtual_free_token = {
215       .address = bo->iova,
216    };
217    vk_rmv_emit_token(&device->vk.memory_trace_data,
218                      VK_RMV_TOKEN_TYPE_VIRTUAL_FREE, &virtual_free_token);
219 
220    tu_rmv_emit_page_table_update_locked(device, bo, true);
221 
222    simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
223 }
224 
225 void
tu_rmv_log_bo_map(struct tu_device * device,struct tu_bo * bo)226 tu_rmv_log_bo_map(struct tu_device *device, struct tu_bo *bo)
227 {
228    simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
229 
230    tu_rmv_emit_cpu_map_locked(device, bo->iova, false);
231 
232    simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
233 }
234 
235 void
tu_rmv_log_bo_unmap(struct tu_device * device,struct tu_bo * bo)236 tu_rmv_log_bo_unmap(struct tu_device *device, struct tu_bo *bo)
237 {
238    simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
239 
240    tu_rmv_emit_cpu_map_locked(device, bo->iova, true);
241 
242    simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
243 }
244 
245 void
tu_rmv_log_buffer_create(struct tu_device * device,struct tu_buffer * buffer)246 tu_rmv_log_buffer_create(struct tu_device *device, struct tu_buffer *buffer)
247 {
248    simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
249 
250    struct vk_rmv_resource_create_token token = {
251       .resource_id = tu_rmv_get_resource_id_locked(device, buffer),
252       .is_driver_internal = false,
253       .type = VK_RMV_RESOURCE_TYPE_BUFFER,
254       .buffer = {
255          .create_flags = buffer->vk.create_flags,
256          .usage_flags = buffer->vk.usage,
257          .size = buffer->vk.size,
258       },
259    };
260    vk_rmv_emit_token(&device->vk.memory_trace_data,
261                      VK_RMV_TOKEN_TYPE_RESOURCE_CREATE, &token);
262 
263    /* Any sparse data would also be reported here, if supported. */
264 
265    simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
266 }
267 
268 void
tu_rmv_log_buffer_destroy(struct tu_device * device,struct tu_buffer * buffer)269 tu_rmv_log_buffer_destroy(struct tu_device *device, struct tu_buffer *buffer)
270 {
271    /* Any sparse data would also be reported here, if supported. */
272    tu_rmv_log_resource_destroy(device, buffer);
273 }
274 
275 void
tu_rmv_log_buffer_bind(struct tu_device * device,struct tu_buffer * buffer)276 tu_rmv_log_buffer_bind(struct tu_device *device, struct tu_buffer *buffer)
277 {
278    simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
279 
280    tu_rmv_emit_resource_bind_locked(device,
281                                     tu_rmv_get_resource_id_locked(device, buffer),
282                                     buffer->bo ? buffer->iova : 0,
283                                     buffer->vk.size);
284 
285    simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
286 }
287 
288 void
tu_rmv_log_image_create(struct tu_device * device,struct tu_image * image)289 tu_rmv_log_image_create(struct tu_device *device, struct tu_image *image)
290 {
291    simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
292 
293    /* TODO: provide the image metadata information */
294    struct vk_rmv_resource_create_token token = {
295       .resource_id = tu_rmv_get_resource_id_locked(device, image),
296       .is_driver_internal = false,
297       .type = VK_RMV_RESOURCE_TYPE_IMAGE,
298       .image = {
299          .create_flags = image->vk.create_flags,
300          .usage_flags = image->vk.usage,
301          .type = image->vk.image_type,
302          .extent = image->vk.extent,
303          .format = image->vk.format,
304          .num_mips = image->vk.mip_levels,
305          .num_slices = image->vk.array_layers,
306          .tiling = image->vk.tiling,
307          .log2_samples = util_logbase2(image->vk.samples),
308          .log2_storage_samples = util_logbase2(image->vk.samples),
309          /* any bound memory should have alignment of 4096 */
310          .alignment_log2 = util_logbase2(4096),
311          .metadata_alignment_log2 = 0,
312          .image_alignment_log2 = util_logbase2(image->layout[0].base_align),
313          .size = image->total_size,
314          .metadata_size = 0,
315          .metadata_header_size = 0,
316          .metadata_offset = 0,
317          .metadata_header_offset = 0,
318          /* TODO: find a better way to determine if an image is presentable */
319          .presentable = image->vk.usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
320       },
321    };
322    vk_rmv_emit_token(&device->vk.memory_trace_data,
323                      VK_RMV_TOKEN_TYPE_RESOURCE_CREATE, &token);
324 
325    /* Any sparse data would also be reported here, if supported. */
326 
327    simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
328 }
329 
330 void
tu_rmv_log_image_destroy(struct tu_device * device,struct tu_image * image)331 tu_rmv_log_image_destroy(struct tu_device *device, struct tu_image *image)
332 {
333    /* Any sparse data would also be reported here, if supported. */
334    tu_rmv_log_resource_destroy(device, image);
335 }
336 
337 void
tu_rmv_log_image_bind(struct tu_device * device,struct tu_image * image)338 tu_rmv_log_image_bind(struct tu_device *device, struct tu_image *image)
339 {
340    simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
341 
342    uint64_t address = image->bo ? image->iova : 0;
343    uint64_t size = image->bo ? image->total_size : 0;
344    tu_rmv_emit_resource_bind_locked(device,
345                                     tu_rmv_get_resource_id_locked(device, image),
346                                     address, size);
347 
348    simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
349 }
350 
351 static inline void
tu_rmv_log_command_allocator_create(struct tu_device * device,void * bo,uint64_t address,uint64_t size)352 tu_rmv_log_command_allocator_create(struct tu_device *device, void *bo,
353                                     uint64_t address, uint64_t size)
354 {
355    simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
356 
357    struct vk_rmv_resource_create_token token = {
358       .resource_id = tu_rmv_get_resource_id_locked(device, bo),
359       .is_driver_internal = true,
360       .type = VK_RMV_RESOURCE_TYPE_COMMAND_ALLOCATOR,
361       .command_buffer = {
362          .preferred_domain = VK_RMV_KERNEL_MEMORY_DOMAIN_VRAM,
363          .executable_size = size,
364          .app_available_executable_size = size,
365          .embedded_data_size = 0,
366          .app_available_embedded_data_size = 0,
367          .scratch_size = 0,
368          .app_available_scratch_size = 0,
369       },
370    };
371    vk_rmv_emit_token(&device->vk.memory_trace_data,
372                      VK_RMV_TOKEN_TYPE_RESOURCE_CREATE, &token);
373 
374    tu_rmv_emit_resource_bind_locked(device, token.resource_id, address, size);
375 
376    simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
377 }
378 
379 void
tu_rmv_log_cmd_buffer_bo_create(struct tu_device * device,struct tu_bo * bo)380 tu_rmv_log_cmd_buffer_bo_create(struct tu_device *device,
381                                 struct tu_bo *bo)
382 {
383    tu_rmv_log_command_allocator_create(device, bo, bo->iova, bo->size);
384 }
385 
386 void
tu_rmv_log_cmd_buffer_suballoc_bo_create(struct tu_device * device,struct tu_suballoc_bo * suballoc_bo)387 tu_rmv_log_cmd_buffer_suballoc_bo_create(struct tu_device *device,
388                                          struct tu_suballoc_bo *suballoc_bo)
389 {
390    tu_rmv_log_command_allocator_create(device, suballoc_bo,
391                                        suballoc_bo->iova, suballoc_bo->size);
392 }
393 
394 void
tu_rmv_log_query_pool_create(struct tu_device * device,struct tu_query_pool * query_pool)395 tu_rmv_log_query_pool_create(struct tu_device *device,
396                              struct tu_query_pool *query_pool)
397 {
398    simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
399 
400    struct vk_rmv_resource_create_token token = {
401       .resource_id = tu_rmv_get_resource_id_locked(device, query_pool),
402       .is_driver_internal = false,
403       .type = VK_RMV_RESOURCE_TYPE_QUERY_HEAP,
404       .query_pool = {
405          .type = query_pool->vk.query_type,
406          .has_cpu_access = true,
407       },
408    };
409    vk_rmv_emit_token(&device->vk.memory_trace_data,
410                      VK_RMV_TOKEN_TYPE_RESOURCE_CREATE, &token);
411 
412    tu_rmv_emit_resource_bind_locked(device, token.resource_id,
413                                     query_pool->bo->iova, query_pool->bo->size);
414 
415    simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
416 }
417 
418 void
tu_rmv_log_descriptor_pool_create(struct tu_device * device,const VkDescriptorPoolCreateInfo * create_info,struct tu_descriptor_pool * descriptor_pool)419 tu_rmv_log_descriptor_pool_create(struct tu_device *device,
420                                   const VkDescriptorPoolCreateInfo *create_info,
421                                   struct tu_descriptor_pool *descriptor_pool)
422 {
423    size_t pool_sizes_size =
424       create_info->poolSizeCount * sizeof(VkDescriptorPoolSize);
425    VkDescriptorPoolSize *pool_sizes =
426       (VkDescriptorPoolSize *) malloc(pool_sizes_size);
427    if (!pool_sizes)
428       return;
429 
430    memcpy(pool_sizes, create_info->pPoolSizes, pool_sizes_size);
431 
432    simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
433 
434    struct vk_rmv_resource_create_token token = {
435       .resource_id = tu_rmv_get_resource_id_locked(device, descriptor_pool),
436       .is_driver_internal = false,
437       .type = VK_RMV_RESOURCE_TYPE_DESCRIPTOR_POOL,
438       .descriptor_pool = {
439          .max_sets = create_info->maxSets,
440          .pool_size_count = create_info->poolSizeCount,
441          .pool_sizes = pool_sizes,
442       },
443    };
444    vk_rmv_emit_token(&device->vk.memory_trace_data,
445                      VK_RMV_TOKEN_TYPE_RESOURCE_CREATE, &token);
446 
447    if (descriptor_pool->bo) {
448       tu_rmv_emit_resource_bind_locked(device, token.resource_id,
449                                        descriptor_pool->bo->iova,
450                                        descriptor_pool->bo->size);
451    }
452 
453    simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
454 }
455 
456 static inline void
tu_rmv_log_pipeline_create(struct tu_device * device,struct tu_pipeline * pipeline)457 tu_rmv_log_pipeline_create(struct tu_device *device,
458                            struct tu_pipeline *pipeline)
459 {
460    simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
461 
462    struct vk_rmv_resource_create_token token = {
463       .resource_id = tu_rmv_get_resource_id_locked(device, pipeline),
464       .is_driver_internal = false,
465       .type = VK_RMV_RESOURCE_TYPE_PIPELINE,
466       .pipeline = {
467          .is_internal = false,
468          /* TODO: provide pipeline hash data when available. */
469          .hash_lo = 0, .hash_hi = 0,
470          .shader_stages = pipeline->active_stages,
471          .is_ngg = false,
472       },
473    };
474    vk_rmv_emit_token(&device->vk.memory_trace_data,
475                      VK_RMV_TOKEN_TYPE_RESOURCE_CREATE, &token);
476 
477    if (pipeline->bo.bo) {
478       tu_rmv_emit_resource_bind_locked(device, token.resource_id,
479                                        pipeline->bo.iova, pipeline->bo.size);
480    }
481 
482    simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
483 }
484 
485 void
tu_rmv_log_graphics_pipeline_create(struct tu_device * device,struct tu_graphics_pipeline * graphics_pipeline)486 tu_rmv_log_graphics_pipeline_create(struct tu_device *device,
487                                     struct tu_graphics_pipeline *graphics_pipeline)
488 {
489    tu_rmv_log_pipeline_create(device, &graphics_pipeline->base);
490 }
491 
492 void
tu_rmv_log_compute_pipeline_create(struct tu_device * device,struct tu_compute_pipeline * compute_pipeline)493 tu_rmv_log_compute_pipeline_create(struct tu_device *device,
494                                    struct tu_compute_pipeline *compute_pipeline)
495 {
496    tu_rmv_log_pipeline_create(device, &compute_pipeline->base);
497 }
498 
499 void
tu_rmv_log_event_create(struct tu_device * device,const VkEventCreateInfo * create_info,struct tu_event * event)500 tu_rmv_log_event_create(struct tu_device *device,
501                         const VkEventCreateInfo *create_info,
502                         struct tu_event *event)
503 {
504    simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
505 
506    struct vk_rmv_resource_create_token token = {
507       .resource_id = tu_rmv_get_resource_id_locked(device, event),
508       .is_driver_internal = false,
509       .type = VK_RMV_RESOURCE_TYPE_GPU_EVENT,
510       .event = {
511          .flags = create_info->flags,
512       },
513    };
514    vk_rmv_emit_token(&device->vk.memory_trace_data,
515                      VK_RMV_TOKEN_TYPE_RESOURCE_CREATE, &token);
516 
517    if (event->bo) {
518       tu_rmv_emit_resource_bind_locked(device, token.resource_id,
519                                        event->bo->iova, event->bo->size);
520    }
521 
522    simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
523 }
524 
525 void
tu_rmv_log_internal_resource_create(struct tu_device * device,struct tu_bo * bo)526 tu_rmv_log_internal_resource_create(struct tu_device *device, struct tu_bo *bo)
527 {
528    simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
529 
530    struct vk_rmv_resource_create_token token = {
531       .resource_id = tu_rmv_get_resource_id_locked(device, bo),
532       .is_driver_internal = true,
533       .type = VK_RMV_RESOURCE_TYPE_MISC_INTERNAL,
534       .misc_internal = {
535          .type = VK_RMV_MISC_INTERNAL_TYPE_PADDING,
536       },
537    };
538    vk_rmv_emit_token(&device->vk.memory_trace_data,
539                      VK_RMV_TOKEN_TYPE_RESOURCE_CREATE, &token);
540 
541    tu_rmv_emit_resource_bind_locked(device, token.resource_id,
542                                     bo->iova, bo->size);
543 
544    simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
545 }
546 
547 void
tu_rmv_log_resource_name(struct tu_device * device,const void * resource,const char * resource_name)548 tu_rmv_log_resource_name(struct tu_device *device, const void *resource,
549                          const char *resource_name)
550 {
551    size_t name_len = MIN2(strlen(resource_name) + 1, 128);
552    char *name_buf = (char *) malloc(name_len);
553    if (!name_buf)
554       return;
555 
556    strncpy(name_buf, resource_name, name_len);
557    name_buf[name_len - 1] = '\0';
558 
559    simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
560 
561    struct vk_rmv_userdata_token token = {
562       .name = name_buf,
563       .resource_id = tu_rmv_get_resource_id_locked(device, resource)
564    };
565    vk_rmv_emit_token(&device->vk.memory_trace_data,
566                      VK_RMV_TOKEN_TYPE_USERDATA, &token);
567 
568    simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
569 }
570 
571 void
tu_rmv_log_resource_destroy(struct tu_device * device,const void * resource)572 tu_rmv_log_resource_destroy(struct tu_device *device, const void *resource)
573 {
574    simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
575 
576    struct vk_rmv_resource_destroy_token token = {
577       .resource_id = tu_rmv_get_resource_id_locked(device, resource),
578    };
579    vk_rmv_emit_token(&device->vk.memory_trace_data,
580                      VK_RMV_TOKEN_TYPE_RESOURCE_DESTROY, &token);
581 
582    tu_rmv_destroy_resource_id_locked(device, resource);
583    simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
584 }
585 
586