• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #include "nir/nir_builder.h"
2 #include "radv_meta.h"
3 
4 #include "radv_cs.h"
5 #include "sid.h"
6 
7 static nir_shader *
build_buffer_fill_shader(struct radv_device * dev)8 build_buffer_fill_shader(struct radv_device *dev)
9 {
10    nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_COMPUTE, NULL, "meta_buffer_fill");
11    b.shader->info.workgroup_size[0] = 64;
12    b.shader->info.workgroup_size[1] = 1;
13    b.shader->info.workgroup_size[2] = 1;
14 
15    nir_ssa_def *global_id = get_global_ids(&b, 1);
16 
17    nir_ssa_def *offset = nir_imul(&b, global_id, nir_imm_int(&b, 16));
18    offset = nir_channel(&b, offset, 0);
19 
20    nir_ssa_def *dst_buf = radv_meta_load_descriptor(&b, 0, 0);
21 
22    nir_ssa_def *load = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0), .range = 4);
23    nir_ssa_def *swizzled_load = nir_swizzle(&b, load, (unsigned[]){0, 0, 0, 0}, 4);
24 
25    nir_store_ssbo(&b, swizzled_load, dst_buf, offset, .write_mask = 0xf,
26                   .access = ACCESS_NON_READABLE, .align_mul = 16);
27 
28    return b.shader;
29 }
30 
31 static nir_shader *
build_buffer_copy_shader(struct radv_device * dev)32 build_buffer_copy_shader(struct radv_device *dev)
33 {
34    nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_COMPUTE, NULL, "meta_buffer_copy");
35    b.shader->info.workgroup_size[0] = 64;
36    b.shader->info.workgroup_size[1] = 1;
37    b.shader->info.workgroup_size[2] = 1;
38 
39    nir_ssa_def *global_id = get_global_ids(&b, 1);
40 
41    nir_ssa_def *offset = nir_imul(&b, global_id, nir_imm_int(&b, 16));
42    offset = nir_channel(&b, offset, 0);
43 
44    nir_ssa_def *dst_buf = radv_meta_load_descriptor(&b, 0, 0);
45    nir_ssa_def *src_buf = radv_meta_load_descriptor(&b, 0, 1);
46 
47    nir_ssa_def *load = nir_load_ssbo(&b, 4, 32, src_buf, offset, .align_mul = 16);
48    nir_store_ssbo(&b, load, dst_buf, offset, .write_mask = 0xf, .access = ACCESS_NON_READABLE,
49                   .align_mul = 16);
50 
51    return b.shader;
52 }
53 
54 VkResult
radv_device_init_meta_buffer_state(struct radv_device * device)55 radv_device_init_meta_buffer_state(struct radv_device *device)
56 {
57    VkResult result;
58    nir_shader *fill_cs = build_buffer_fill_shader(device);
59    nir_shader *copy_cs = build_buffer_copy_shader(device);
60 
61    VkDescriptorSetLayoutCreateInfo fill_ds_create_info = {
62       .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
63       .flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR,
64       .bindingCount = 1,
65       .pBindings = (VkDescriptorSetLayoutBinding[]){
66          {.binding = 0,
67           .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
68           .descriptorCount = 1,
69           .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
70           .pImmutableSamplers = NULL},
71       }};
72 
73    result = radv_CreateDescriptorSetLayout(radv_device_to_handle(device), &fill_ds_create_info,
74                                            &device->meta_state.alloc,
75                                            &device->meta_state.buffer.fill_ds_layout);
76    if (result != VK_SUCCESS)
77       goto fail;
78 
79    VkDescriptorSetLayoutCreateInfo copy_ds_create_info = {
80       .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
81       .flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR,
82       .bindingCount = 2,
83       .pBindings = (VkDescriptorSetLayoutBinding[]){
84          {.binding = 0,
85           .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
86           .descriptorCount = 1,
87           .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
88           .pImmutableSamplers = NULL},
89          {.binding = 1,
90           .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
91           .descriptorCount = 1,
92           .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
93           .pImmutableSamplers = NULL},
94       }};
95 
96    result = radv_CreateDescriptorSetLayout(radv_device_to_handle(device), &copy_ds_create_info,
97                                            &device->meta_state.alloc,
98                                            &device->meta_state.buffer.copy_ds_layout);
99    if (result != VK_SUCCESS)
100       goto fail;
101 
102    VkPipelineLayoutCreateInfo fill_pl_create_info = {
103       .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
104       .setLayoutCount = 1,
105       .pSetLayouts = &device->meta_state.buffer.fill_ds_layout,
106       .pushConstantRangeCount = 1,
107       .pPushConstantRanges = &(VkPushConstantRange){VK_SHADER_STAGE_COMPUTE_BIT, 0, 4},
108    };
109 
110    result = radv_CreatePipelineLayout(radv_device_to_handle(device), &fill_pl_create_info,
111                                       &device->meta_state.alloc,
112                                       &device->meta_state.buffer.fill_p_layout);
113    if (result != VK_SUCCESS)
114       goto fail;
115 
116    VkPipelineLayoutCreateInfo copy_pl_create_info = {
117       .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
118       .setLayoutCount = 1,
119       .pSetLayouts = &device->meta_state.buffer.copy_ds_layout,
120       .pushConstantRangeCount = 0,
121    };
122 
123    result = radv_CreatePipelineLayout(radv_device_to_handle(device), &copy_pl_create_info,
124                                       &device->meta_state.alloc,
125                                       &device->meta_state.buffer.copy_p_layout);
126    if (result != VK_SUCCESS)
127       goto fail;
128 
129    VkPipelineShaderStageCreateInfo fill_pipeline_shader_stage = {
130       .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
131       .stage = VK_SHADER_STAGE_COMPUTE_BIT,
132       .module = vk_shader_module_handle_from_nir(fill_cs),
133       .pName = "main",
134       .pSpecializationInfo = NULL,
135    };
136 
137    VkComputePipelineCreateInfo fill_vk_pipeline_info = {
138       .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
139       .stage = fill_pipeline_shader_stage,
140       .flags = 0,
141       .layout = device->meta_state.buffer.fill_p_layout,
142    };
143 
144    result = radv_CreateComputePipelines(
145       radv_device_to_handle(device), radv_pipeline_cache_to_handle(&device->meta_state.cache), 1,
146       &fill_vk_pipeline_info, NULL, &device->meta_state.buffer.fill_pipeline);
147    if (result != VK_SUCCESS)
148       goto fail;
149 
150    VkPipelineShaderStageCreateInfo copy_pipeline_shader_stage = {
151       .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
152       .stage = VK_SHADER_STAGE_COMPUTE_BIT,
153       .module = vk_shader_module_handle_from_nir(copy_cs),
154       .pName = "main",
155       .pSpecializationInfo = NULL,
156    };
157 
158    VkComputePipelineCreateInfo copy_vk_pipeline_info = {
159       .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
160       .stage = copy_pipeline_shader_stage,
161       .flags = 0,
162       .layout = device->meta_state.buffer.copy_p_layout,
163    };
164 
165    result = radv_CreateComputePipelines(
166       radv_device_to_handle(device), radv_pipeline_cache_to_handle(&device->meta_state.cache), 1,
167       &copy_vk_pipeline_info, NULL, &device->meta_state.buffer.copy_pipeline);
168    if (result != VK_SUCCESS)
169       goto fail;
170 
171    ralloc_free(fill_cs);
172    ralloc_free(copy_cs);
173    return VK_SUCCESS;
174 fail:
175    radv_device_finish_meta_buffer_state(device);
176    ralloc_free(fill_cs);
177    ralloc_free(copy_cs);
178    return result;
179 }
180 
181 void
radv_device_finish_meta_buffer_state(struct radv_device * device)182 radv_device_finish_meta_buffer_state(struct radv_device *device)
183 {
184    struct radv_meta_state *state = &device->meta_state;
185 
186    radv_DestroyPipeline(radv_device_to_handle(device), state->buffer.copy_pipeline, &state->alloc);
187    radv_DestroyPipeline(radv_device_to_handle(device), state->buffer.fill_pipeline, &state->alloc);
188    radv_DestroyPipelineLayout(radv_device_to_handle(device), state->buffer.copy_p_layout,
189                               &state->alloc);
190    radv_DestroyPipelineLayout(radv_device_to_handle(device), state->buffer.fill_p_layout,
191                               &state->alloc);
192    radv_DestroyDescriptorSetLayout(radv_device_to_handle(device), state->buffer.copy_ds_layout,
193                                    &state->alloc);
194    radv_DestroyDescriptorSetLayout(radv_device_to_handle(device), state->buffer.fill_ds_layout,
195                                    &state->alloc);
196 }
197 
198 static void
fill_buffer_shader(struct radv_cmd_buffer * cmd_buffer,struct radeon_winsys_bo * bo,uint64_t offset,uint64_t size,uint32_t value)199 fill_buffer_shader(struct radv_cmd_buffer *cmd_buffer, struct radeon_winsys_bo *bo, uint64_t offset,
200                    uint64_t size, uint32_t value)
201 {
202    struct radv_device *device = cmd_buffer->device;
203    uint64_t block_count = round_up_u64(size, 1024);
204    struct radv_meta_saved_state saved_state;
205    struct radv_buffer dst_buffer;
206 
207    radv_meta_save(
208       &saved_state, cmd_buffer,
209       RADV_META_SAVE_COMPUTE_PIPELINE | RADV_META_SAVE_CONSTANTS | RADV_META_SAVE_DESCRIPTORS);
210 
211    radv_buffer_init(&dst_buffer, cmd_buffer->device, bo, size, offset);
212 
213    radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer), VK_PIPELINE_BIND_POINT_COMPUTE,
214                         device->meta_state.buffer.fill_pipeline);
215 
216    radv_meta_push_descriptor_set(
217       cmd_buffer, VK_PIPELINE_BIND_POINT_COMPUTE, device->meta_state.buffer.fill_p_layout,
218       0, /* set */
219       1, /* descriptorWriteCount */
220       (VkWriteDescriptorSet[]){
221          {.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
222           .dstBinding = 0,
223           .dstArrayElement = 0,
224           .descriptorCount = 1,
225           .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
226           .pBufferInfo = &(VkDescriptorBufferInfo){.buffer = radv_buffer_to_handle(&dst_buffer),
227                                                    .offset = 0,
228                                                    .range = size}}});
229 
230    radv_CmdPushConstants(radv_cmd_buffer_to_handle(cmd_buffer),
231                          device->meta_state.buffer.fill_p_layout, VK_SHADER_STAGE_COMPUTE_BIT, 0, 4,
232                          &value);
233 
234    radv_CmdDispatch(radv_cmd_buffer_to_handle(cmd_buffer), block_count, 1, 1);
235 
236    radv_buffer_finish(&dst_buffer);
237 
238    radv_meta_restore(&saved_state, cmd_buffer);
239 }
240 
241 static void
copy_buffer_shader(struct radv_cmd_buffer * cmd_buffer,struct radeon_winsys_bo * src_bo,struct radeon_winsys_bo * dst_bo,uint64_t src_offset,uint64_t dst_offset,uint64_t size)242 copy_buffer_shader(struct radv_cmd_buffer *cmd_buffer, struct radeon_winsys_bo *src_bo,
243                    struct radeon_winsys_bo *dst_bo, uint64_t src_offset, uint64_t dst_offset,
244                    uint64_t size)
245 {
246    struct radv_device *device = cmd_buffer->device;
247    uint64_t block_count = round_up_u64(size, 1024);
248    struct radv_meta_saved_state saved_state;
249    struct radv_buffer src_buffer, dst_buffer;
250 
251    radv_meta_save(&saved_state, cmd_buffer,
252                   RADV_META_SAVE_COMPUTE_PIPELINE | RADV_META_SAVE_DESCRIPTORS);
253 
254    radv_buffer_init(&src_buffer, cmd_buffer->device, src_bo, size, src_offset);
255    radv_buffer_init(&dst_buffer, cmd_buffer->device, dst_bo, size, dst_offset);
256 
257    radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer), VK_PIPELINE_BIND_POINT_COMPUTE,
258                         device->meta_state.buffer.copy_pipeline);
259 
260    radv_meta_push_descriptor_set(
261       cmd_buffer, VK_PIPELINE_BIND_POINT_COMPUTE, device->meta_state.buffer.copy_p_layout,
262       0, /* set */
263       2, /* descriptorWriteCount */
264       (VkWriteDescriptorSet[]){
265          {.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
266           .dstBinding = 0,
267           .dstArrayElement = 0,
268           .descriptorCount = 1,
269           .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
270           .pBufferInfo = &(VkDescriptorBufferInfo){.buffer = radv_buffer_to_handle(&dst_buffer),
271                                                    .offset = 0,
272                                                    .range = size}},
273          {.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
274           .dstBinding = 1,
275           .dstArrayElement = 0,
276           .descriptorCount = 1,
277           .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
278           .pBufferInfo = &(VkDescriptorBufferInfo){.buffer = radv_buffer_to_handle(&src_buffer),
279                                                    .offset = 0,
280                                                    .range = size}}});
281 
282    radv_CmdDispatch(radv_cmd_buffer_to_handle(cmd_buffer), block_count, 1, 1);
283 
284    radv_buffer_finish(&src_buffer);
285    radv_buffer_finish(&dst_buffer);
286 
287    radv_meta_restore(&saved_state, cmd_buffer);
288 }
289 
290 static bool
radv_prefer_compute_dma(const struct radv_device * device,uint64_t size,struct radeon_winsys_bo * src_bo,struct radeon_winsys_bo * dst_bo)291 radv_prefer_compute_dma(const struct radv_device *device, uint64_t size,
292                         struct radeon_winsys_bo *src_bo, struct radeon_winsys_bo *dst_bo)
293 {
294    bool use_compute = size >= RADV_BUFFER_OPS_CS_THRESHOLD;
295 
296    if (device->physical_device->rad_info.chip_class >= GFX10 &&
297        device->physical_device->rad_info.has_dedicated_vram) {
298       if ((src_bo && !(src_bo->initial_domain & RADEON_DOMAIN_VRAM)) ||
299           !(dst_bo->initial_domain & RADEON_DOMAIN_VRAM)) {
300          /* Prefer CP DMA for GTT on dGPUS due to slow PCIe. */
301          use_compute = false;
302       }
303    }
304 
305    return use_compute;
306 }
307 
308 uint32_t
radv_fill_buffer(struct radv_cmd_buffer * cmd_buffer,const struct radv_image * image,struct radeon_winsys_bo * bo,uint64_t offset,uint64_t size,uint32_t value)309 radv_fill_buffer(struct radv_cmd_buffer *cmd_buffer, const struct radv_image *image,
310                  struct radeon_winsys_bo *bo, uint64_t offset, uint64_t size, uint32_t value)
311 {
312    bool use_compute = radv_prefer_compute_dma(cmd_buffer->device, size, NULL, bo);
313    uint32_t flush_bits = 0;
314 
315    assert(!(offset & 3));
316    assert(!(size & 3));
317 
318    if (use_compute) {
319       cmd_buffer->state.flush_bits |=
320          radv_dst_access_flush(cmd_buffer, VK_ACCESS_SHADER_WRITE_BIT, image);
321 
322       fill_buffer_shader(cmd_buffer, bo, offset, size, value);
323 
324       flush_bits = RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_INV_VCACHE |
325                    radv_src_access_flush(cmd_buffer, VK_ACCESS_SHADER_WRITE_BIT, image);
326    } else if (size) {
327       uint64_t va = radv_buffer_get_va(bo);
328       va += offset;
329       radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, bo);
330       si_cp_dma_clear_buffer(cmd_buffer, va, size, value);
331    }
332 
333    return flush_bits;
334 }
335 
336 static void
radv_copy_buffer(struct radv_cmd_buffer * cmd_buffer,struct radeon_winsys_bo * src_bo,struct radeon_winsys_bo * dst_bo,uint64_t src_offset,uint64_t dst_offset,uint64_t size)337 radv_copy_buffer(struct radv_cmd_buffer *cmd_buffer, struct radeon_winsys_bo *src_bo,
338                  struct radeon_winsys_bo *dst_bo, uint64_t src_offset, uint64_t dst_offset,
339                  uint64_t size)
340 {
341    bool use_compute = !(size & 3) && !(src_offset & 3) && !(dst_offset & 3) &&
342                       radv_prefer_compute_dma(cmd_buffer->device, size, src_bo, dst_bo);
343 
344    if (use_compute)
345       copy_buffer_shader(cmd_buffer, src_bo, dst_bo, src_offset, dst_offset, size);
346    else if (size) {
347       uint64_t src_va = radv_buffer_get_va(src_bo);
348       uint64_t dst_va = radv_buffer_get_va(dst_bo);
349       src_va += src_offset;
350       dst_va += dst_offset;
351 
352       radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, src_bo);
353       radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, dst_bo);
354 
355       si_cp_dma_buffer_copy(cmd_buffer, src_va, dst_va, size);
356    }
357 }
358 
359 void
radv_CmdFillBuffer(VkCommandBuffer commandBuffer,VkBuffer dstBuffer,VkDeviceSize dstOffset,VkDeviceSize fillSize,uint32_t data)360 radv_CmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
361                    VkDeviceSize fillSize, uint32_t data)
362 {
363    RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
364    RADV_FROM_HANDLE(radv_buffer, dst_buffer, dstBuffer);
365 
366    if (fillSize == VK_WHOLE_SIZE)
367       fillSize = (dst_buffer->size - dstOffset) & ~3ull;
368 
369    radv_fill_buffer(cmd_buffer, NULL, dst_buffer->bo, dst_buffer->offset + dstOffset, fillSize,
370                     data);
371 }
372 
373 static void
copy_buffer(struct radv_cmd_buffer * cmd_buffer,struct radv_buffer * src_buffer,struct radv_buffer * dst_buffer,const VkBufferCopy2KHR * region)374 copy_buffer(struct radv_cmd_buffer *cmd_buffer, struct radv_buffer *src_buffer,
375             struct radv_buffer *dst_buffer, const VkBufferCopy2KHR *region)
376 {
377    bool old_predicating;
378 
379    /* VK_EXT_conditional_rendering says that copy commands should not be
380     * affected by conditional rendering.
381     */
382    old_predicating = cmd_buffer->state.predicating;
383    cmd_buffer->state.predicating = false;
384 
385    radv_copy_buffer(cmd_buffer, src_buffer->bo, dst_buffer->bo,
386                     src_buffer->offset + region->srcOffset, dst_buffer->offset + region->dstOffset,
387                     region->size);
388 
389    /* Restore conditional rendering. */
390    cmd_buffer->state.predicating = old_predicating;
391 }
392 
393 void
radv_CmdCopyBuffer2KHR(VkCommandBuffer commandBuffer,const VkCopyBufferInfo2KHR * pCopyBufferInfo)394 radv_CmdCopyBuffer2KHR(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2KHR *pCopyBufferInfo)
395 {
396    RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
397    RADV_FROM_HANDLE(radv_buffer, src_buffer, pCopyBufferInfo->srcBuffer);
398    RADV_FROM_HANDLE(radv_buffer, dst_buffer, pCopyBufferInfo->dstBuffer);
399 
400    for (unsigned r = 0; r < pCopyBufferInfo->regionCount; r++) {
401       copy_buffer(cmd_buffer, src_buffer, dst_buffer, &pCopyBufferInfo->pRegions[r]);
402    }
403 }
404 
405 void
radv_update_buffer_cp(struct radv_cmd_buffer * cmd_buffer,uint64_t va,const void * data,uint64_t size)406 radv_update_buffer_cp(struct radv_cmd_buffer *cmd_buffer, uint64_t va, const void *data,
407                       uint64_t size)
408 {
409    uint64_t words = size / 4;
410    bool mec = radv_cmd_buffer_uses_mec(cmd_buffer);
411 
412    assert(size < RADV_BUFFER_UPDATE_THRESHOLD);
413 
414    si_emit_cache_flush(cmd_buffer);
415    radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, words + 4);
416 
417    radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 2 + words, 0));
418    radeon_emit(cmd_buffer->cs, S_370_DST_SEL(mec ? V_370_MEM : V_370_MEM_GRBM) |
419                                   S_370_WR_CONFIRM(1) | S_370_ENGINE_SEL(V_370_ME));
420    radeon_emit(cmd_buffer->cs, va);
421    radeon_emit(cmd_buffer->cs, va >> 32);
422    radeon_emit_array(cmd_buffer->cs, data, words);
423 
424    if (unlikely(cmd_buffer->device->trace_bo))
425       radv_cmd_buffer_trace_emit(cmd_buffer);
426 }
427 
428 void
radv_CmdUpdateBuffer(VkCommandBuffer commandBuffer,VkBuffer dstBuffer,VkDeviceSize dstOffset,VkDeviceSize dataSize,const void * pData)429 radv_CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
430                      VkDeviceSize dataSize, const void *pData)
431 {
432    RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
433    RADV_FROM_HANDLE(radv_buffer, dst_buffer, dstBuffer);
434    uint64_t va = radv_buffer_get_va(dst_buffer->bo);
435    va += dstOffset + dst_buffer->offset;
436 
437    assert(!(dataSize & 3));
438    assert(!(va & 3));
439 
440    if (!dataSize)
441       return;
442 
443    if (dataSize < RADV_BUFFER_UPDATE_THRESHOLD) {
444       radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, dst_buffer->bo);
445       radv_update_buffer_cp(cmd_buffer, va, pData, dataSize);
446    } else {
447       uint32_t buf_offset;
448       radv_cmd_buffer_upload_data(cmd_buffer, dataSize, pData, &buf_offset);
449       radv_copy_buffer(cmd_buffer, cmd_buffer->upload.upload_bo, dst_buffer->bo, buf_offset,
450                        dstOffset + dst_buffer->offset, dataSize);
451    }
452 }
453