• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #include "nir/nir_builder.h"
2 #include "radv_cp_dma.h"
3 #include "radv_debug.h"
4 #include "radv_meta.h"
5 #include "radv_sdma.h"
6 
7 #include "radv_cs.h"
8 #include "sid.h"
9 #include "vk_common_entrypoints.h"
10 
11 static nir_shader *
build_buffer_fill_shader(struct radv_device * dev)12 build_buffer_fill_shader(struct radv_device *dev)
13 {
14    nir_builder b = radv_meta_init_shader(dev, MESA_SHADER_COMPUTE, "meta_buffer_fill");
15    b.shader->info.workgroup_size[0] = 64;
16 
17    nir_def *pconst = nir_load_push_constant(&b, 4, 32, nir_imm_int(&b, 0), .range = 16);
18    nir_def *buffer_addr = nir_pack_64_2x32(&b, nir_channels(&b, pconst, 0b0011));
19    nir_def *max_offset = nir_channel(&b, pconst, 2);
20    nir_def *data = nir_swizzle(&b, nir_channel(&b, pconst, 3), (unsigned[]){0, 0, 0, 0}, 4);
21 
22    nir_def *global_id =
23       nir_iadd(&b, nir_imul_imm(&b, nir_channel(&b, nir_load_workgroup_id(&b), 0), b.shader->info.workgroup_size[0]),
24                nir_load_local_invocation_index(&b));
25 
26    nir_def *offset = nir_imin(&b, nir_imul_imm(&b, global_id, 16), max_offset);
27    nir_def *dst_addr = nir_iadd(&b, buffer_addr, nir_u2u64(&b, offset));
28    nir_build_store_global(&b, data, dst_addr, .align_mul = 4);
29 
30    return b.shader;
31 }
32 
33 struct fill_constants {
34    uint64_t addr;
35    uint32_t max_offset;
36    uint32_t data;
37 };
38 
39 static VkResult
get_fill_pipeline(struct radv_device * device,VkPipeline * pipeline_out,VkPipelineLayout * layout_out)40 get_fill_pipeline(struct radv_device *device, VkPipeline *pipeline_out, VkPipelineLayout *layout_out)
41 {
42    const char *key_data = "radv-fill-buffer";
43    VkResult result;
44 
45    const VkPushConstantRange pc_range = {
46       .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
47       .size = sizeof(struct fill_constants),
48    };
49 
50    result = vk_meta_get_pipeline_layout(&device->vk, &device->meta_state.device, NULL, &pc_range, key_data,
51                                         strlen(key_data), layout_out);
52    if (result != VK_SUCCESS)
53       return result;
54 
55    VkPipeline pipeline_from_cache = vk_meta_lookup_pipeline(&device->meta_state.device, key_data, strlen(key_data));
56    if (pipeline_from_cache != VK_NULL_HANDLE) {
57       *pipeline_out = pipeline_from_cache;
58       return VK_SUCCESS;
59    }
60 
61    nir_shader *cs = build_buffer_fill_shader(device);
62 
63    const VkPipelineShaderStageCreateInfo stage_info = {
64       .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
65       .stage = VK_SHADER_STAGE_COMPUTE_BIT,
66       .module = vk_shader_module_handle_from_nir(cs),
67       .pName = "main",
68       .pSpecializationInfo = NULL,
69    };
70 
71    const VkComputePipelineCreateInfo pipeline_info = {
72       .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
73       .stage = stage_info,
74       .flags = 0,
75       .layout = *layout_out,
76    };
77 
78    result = vk_meta_create_compute_pipeline(&device->vk, &device->meta_state.device, &pipeline_info, key_data,
79                                             strlen(key_data), pipeline_out);
80 
81    ralloc_free(cs);
82    return result;
83 }
84 
85 static nir_shader *
build_buffer_copy_shader(struct radv_device * dev)86 build_buffer_copy_shader(struct radv_device *dev)
87 {
88    nir_builder b = radv_meta_init_shader(dev, MESA_SHADER_COMPUTE, "meta_buffer_copy");
89    b.shader->info.workgroup_size[0] = 64;
90 
91    nir_def *pconst = nir_load_push_constant(&b, 4, 32, nir_imm_int(&b, 0), .range = 16);
92    nir_def *max_offset = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0), .base = 16, .range = 4);
93    nir_def *src_addr = nir_pack_64_2x32(&b, nir_channels(&b, pconst, 0b0011));
94    nir_def *dst_addr = nir_pack_64_2x32(&b, nir_channels(&b, pconst, 0b1100));
95 
96    nir_def *global_id =
97       nir_iadd(&b, nir_imul_imm(&b, nir_channel(&b, nir_load_workgroup_id(&b), 0), b.shader->info.workgroup_size[0]),
98                nir_load_local_invocation_index(&b));
99 
100    nir_def *offset = nir_u2u64(&b, nir_imin(&b, nir_imul_imm(&b, global_id, 16), max_offset));
101 
102    nir_def *data = nir_build_load_global(&b, 4, 32, nir_iadd(&b, src_addr, offset), .align_mul = 4);
103    nir_build_store_global(&b, data, nir_iadd(&b, dst_addr, offset), .align_mul = 4);
104 
105    return b.shader;
106 }
107 
108 struct copy_constants {
109    uint64_t src_addr;
110    uint64_t dst_addr;
111    uint32_t max_offset;
112 };
113 
114 static VkResult
get_copy_pipeline(struct radv_device * device,VkPipeline * pipeline_out,VkPipelineLayout * layout_out)115 get_copy_pipeline(struct radv_device *device, VkPipeline *pipeline_out, VkPipelineLayout *layout_out)
116 {
117    const char *key_data = "radv-copy-buffer";
118    VkResult result;
119 
120    const VkPushConstantRange pc_range = {
121       .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
122       .size = sizeof(struct copy_constants),
123    };
124 
125    result = vk_meta_get_pipeline_layout(&device->vk, &device->meta_state.device, NULL, &pc_range, key_data,
126                                         strlen(key_data), layout_out);
127    if (result != VK_SUCCESS)
128       return result;
129 
130    VkPipeline pipeline_from_cache = vk_meta_lookup_pipeline(&device->meta_state.device, key_data, strlen(key_data));
131    if (pipeline_from_cache != VK_NULL_HANDLE) {
132       *pipeline_out = pipeline_from_cache;
133       return VK_SUCCESS;
134    }
135 
136    nir_shader *cs = build_buffer_copy_shader(device);
137 
138    const VkPipelineShaderStageCreateInfo stage_info = {
139       .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
140       .stage = VK_SHADER_STAGE_COMPUTE_BIT,
141       .module = vk_shader_module_handle_from_nir(cs),
142       .pName = "main",
143       .pSpecializationInfo = NULL,
144    };
145 
146    const VkComputePipelineCreateInfo pipeline_info = {
147       .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
148       .stage = stage_info,
149       .flags = 0,
150       .layout = *layout_out,
151    };
152 
153    result = vk_meta_create_compute_pipeline(&device->vk, &device->meta_state.device, &pipeline_info, key_data,
154                                             strlen(key_data), pipeline_out);
155 
156    ralloc_free(cs);
157    return result;
158 }
159 
160 static void
fill_buffer_shader(struct radv_cmd_buffer * cmd_buffer,uint64_t va,uint64_t size,uint32_t data)161 fill_buffer_shader(struct radv_cmd_buffer *cmd_buffer, uint64_t va, uint64_t size, uint32_t data)
162 {
163    struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
164    struct radv_meta_saved_state saved_state;
165    VkPipelineLayout layout;
166    VkPipeline pipeline;
167    VkResult result;
168 
169    result = get_fill_pipeline(device, &pipeline, &layout);
170    if (result != VK_SUCCESS) {
171       vk_command_buffer_set_error(&cmd_buffer->vk, result);
172       return;
173    }
174 
175    radv_meta_save(&saved_state, cmd_buffer, RADV_META_SAVE_COMPUTE_PIPELINE | RADV_META_SAVE_CONSTANTS);
176 
177    radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer), VK_PIPELINE_BIND_POINT_COMPUTE, pipeline);
178 
179    assert(size >= 16 && size <= UINT32_MAX);
180 
181    struct fill_constants fill_consts = {
182       .addr = va,
183       .max_offset = size - 16,
184       .data = data,
185    };
186 
187    vk_common_CmdPushConstants(radv_cmd_buffer_to_handle(cmd_buffer), layout, VK_SHADER_STAGE_COMPUTE_BIT, 0,
188                               sizeof(fill_consts), &fill_consts);
189 
190    radv_unaligned_dispatch(cmd_buffer, DIV_ROUND_UP(size, 16), 1, 1);
191 
192    radv_meta_restore(&saved_state, cmd_buffer);
193 }
194 
195 static void
copy_buffer_shader(struct radv_cmd_buffer * cmd_buffer,uint64_t src_va,uint64_t dst_va,uint64_t size)196 copy_buffer_shader(struct radv_cmd_buffer *cmd_buffer, uint64_t src_va, uint64_t dst_va, uint64_t size)
197 {
198    struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
199    struct radv_meta_saved_state saved_state;
200    VkPipelineLayout layout;
201    VkPipeline pipeline;
202    VkResult result;
203 
204    result = get_copy_pipeline(device, &pipeline, &layout);
205    if (result != VK_SUCCESS) {
206       vk_command_buffer_set_error(&cmd_buffer->vk, result);
207       return;
208    }
209 
210    radv_meta_save(&saved_state, cmd_buffer, RADV_META_SAVE_COMPUTE_PIPELINE | RADV_META_SAVE_CONSTANTS);
211 
212    radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer), VK_PIPELINE_BIND_POINT_COMPUTE, pipeline);
213 
214    assert(size >= 16 && size <= UINT32_MAX);
215 
216    struct copy_constants copy_consts = {
217       .src_addr = src_va,
218       .dst_addr = dst_va,
219       .max_offset = size - 16,
220    };
221 
222    vk_common_CmdPushConstants(radv_cmd_buffer_to_handle(cmd_buffer), layout, VK_SHADER_STAGE_COMPUTE_BIT, 0,
223                               sizeof(copy_consts), &copy_consts);
224 
225    radv_unaligned_dispatch(cmd_buffer, DIV_ROUND_UP(size, 16), 1, 1);
226 
227    radv_meta_restore(&saved_state, cmd_buffer);
228 }
229 
230 static bool
radv_prefer_compute_dma(const struct radv_device * device,uint64_t size,struct radeon_winsys_bo * src_bo,struct radeon_winsys_bo * dst_bo)231 radv_prefer_compute_dma(const struct radv_device *device, uint64_t size, struct radeon_winsys_bo *src_bo,
232                         struct radeon_winsys_bo *dst_bo)
233 {
234    const struct radv_physical_device *pdev = radv_device_physical(device);
235    bool use_compute = size >= RADV_BUFFER_OPS_CS_THRESHOLD;
236 
237    if (pdev->info.gfx_level >= GFX10 && pdev->info.has_dedicated_vram) {
238       if ((src_bo && !(src_bo->initial_domain & RADEON_DOMAIN_VRAM)) ||
239           (dst_bo && !(dst_bo->initial_domain & RADEON_DOMAIN_VRAM))) {
240          /* Prefer CP DMA for GTT on dGPUS due to slow PCIe. */
241          use_compute = false;
242       }
243    }
244 
245    return use_compute;
246 }
247 
248 uint32_t
radv_fill_buffer(struct radv_cmd_buffer * cmd_buffer,const struct radv_image * image,struct radeon_winsys_bo * bo,uint64_t va,uint64_t size,uint32_t value)249 radv_fill_buffer(struct radv_cmd_buffer *cmd_buffer, const struct radv_image *image, struct radeon_winsys_bo *bo,
250                  uint64_t va, uint64_t size, uint32_t value)
251 {
252    struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
253    bool use_compute = radv_prefer_compute_dma(device, size, NULL, bo);
254    uint32_t flush_bits = 0;
255 
256    assert(!(va & 3));
257    assert(!(size & 3));
258 
259    if (bo)
260       radv_cs_add_buffer(device->ws, cmd_buffer->cs, bo);
261 
262    if (cmd_buffer->qf == RADV_QUEUE_TRANSFER) {
263       radv_sdma_fill_buffer(device, cmd_buffer->cs, va, size, value);
264    } else if (use_compute) {
265       fill_buffer_shader(cmd_buffer, va, size, value);
266 
267       flush_bits = RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_INV_VCACHE |
268                    radv_src_access_flush(cmd_buffer, VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT,
269                                          VK_ACCESS_2_SHADER_WRITE_BIT, image, NULL);
270    } else if (size)
271       radv_cp_dma_clear_buffer(cmd_buffer, va, size, value);
272 
273    return flush_bits;
274 }
275 
276 void
radv_copy_buffer(struct radv_cmd_buffer * cmd_buffer,struct radeon_winsys_bo * src_bo,struct radeon_winsys_bo * dst_bo,uint64_t src_offset,uint64_t dst_offset,uint64_t size)277 radv_copy_buffer(struct radv_cmd_buffer *cmd_buffer, struct radeon_winsys_bo *src_bo, struct radeon_winsys_bo *dst_bo,
278                  uint64_t src_offset, uint64_t dst_offset, uint64_t size)
279 {
280    struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
281    bool use_compute =
282       !(size & 3) && !(src_offset & 3) && !(dst_offset & 3) && radv_prefer_compute_dma(device, size, src_bo, dst_bo);
283 
284    uint64_t src_va = radv_buffer_get_va(src_bo) + src_offset;
285    uint64_t dst_va = radv_buffer_get_va(dst_bo) + dst_offset;
286 
287    radv_cs_add_buffer(device->ws, cmd_buffer->cs, src_bo);
288    radv_cs_add_buffer(device->ws, cmd_buffer->cs, dst_bo);
289 
290    if (cmd_buffer->qf == RADV_QUEUE_TRANSFER)
291       radv_sdma_copy_buffer(device, cmd_buffer->cs, src_va, dst_va, size);
292    else if (use_compute)
293       copy_buffer_shader(cmd_buffer, src_va, dst_va, size);
294    else if (size)
295       radv_cp_dma_buffer_copy(cmd_buffer, src_va, dst_va, size);
296 }
297 
298 VKAPI_ATTR void VKAPI_CALL
radv_CmdFillBuffer(VkCommandBuffer commandBuffer,VkBuffer dstBuffer,VkDeviceSize dstOffset,VkDeviceSize fillSize,uint32_t data)299 radv_CmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize fillSize,
300                    uint32_t data)
301 {
302    VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
303    VK_FROM_HANDLE(radv_buffer, dst_buffer, dstBuffer);
304 
305    fillSize = vk_buffer_range(&dst_buffer->vk, dstOffset, fillSize) & ~3ull;
306 
307    radv_fill_buffer(cmd_buffer, NULL, dst_buffer->bo,
308                     radv_buffer_get_va(dst_buffer->bo) + dst_buffer->offset + dstOffset, fillSize, data);
309 }
310 
311 static void
copy_buffer(struct radv_cmd_buffer * cmd_buffer,struct radv_buffer * src_buffer,struct radv_buffer * dst_buffer,const VkBufferCopy2 * region)312 copy_buffer(struct radv_cmd_buffer *cmd_buffer, struct radv_buffer *src_buffer, struct radv_buffer *dst_buffer,
313             const VkBufferCopy2 *region)
314 {
315    bool old_predicating;
316 
317    /* VK_EXT_conditional_rendering says that copy commands should not be
318     * affected by conditional rendering.
319     */
320    old_predicating = cmd_buffer->state.predicating;
321    cmd_buffer->state.predicating = false;
322 
323    radv_copy_buffer(cmd_buffer, src_buffer->bo, dst_buffer->bo, src_buffer->offset + region->srcOffset,
324                     dst_buffer->offset + region->dstOffset, region->size);
325 
326    /* Restore conditional rendering. */
327    cmd_buffer->state.predicating = old_predicating;
328 }
329 
330 VKAPI_ATTR void VKAPI_CALL
radv_CmdCopyBuffer2(VkCommandBuffer commandBuffer,const VkCopyBufferInfo2 * pCopyBufferInfo)331 radv_CmdCopyBuffer2(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2 *pCopyBufferInfo)
332 {
333    VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
334    VK_FROM_HANDLE(radv_buffer, src_buffer, pCopyBufferInfo->srcBuffer);
335    VK_FROM_HANDLE(radv_buffer, dst_buffer, pCopyBufferInfo->dstBuffer);
336 
337    for (unsigned r = 0; r < pCopyBufferInfo->regionCount; r++) {
338       copy_buffer(cmd_buffer, src_buffer, dst_buffer, &pCopyBufferInfo->pRegions[r]);
339    }
340 }
341 
342 void
radv_update_buffer_cp(struct radv_cmd_buffer * cmd_buffer,uint64_t va,const void * data,uint64_t size)343 radv_update_buffer_cp(struct radv_cmd_buffer *cmd_buffer, uint64_t va, const void *data, uint64_t size)
344 {
345    struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
346    uint64_t words = size / 4;
347    bool mec = radv_cmd_buffer_uses_mec(cmd_buffer);
348 
349    assert(size < RADV_BUFFER_UPDATE_THRESHOLD);
350 
351    radv_emit_cache_flush(cmd_buffer);
352    radeon_check_space(device->ws, cmd_buffer->cs, words + 4);
353 
354    radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 2 + words, 0));
355    radeon_emit(cmd_buffer->cs,
356                S_370_DST_SEL(mec ? V_370_MEM : V_370_MEM_GRBM) | S_370_WR_CONFIRM(1) | S_370_ENGINE_SEL(V_370_ME));
357    radeon_emit(cmd_buffer->cs, va);
358    radeon_emit(cmd_buffer->cs, va >> 32);
359    radeon_emit_array(cmd_buffer->cs, data, words);
360 
361    if (radv_device_fault_detection_enabled(device))
362       radv_cmd_buffer_trace_emit(cmd_buffer);
363 }
364 
365 VKAPI_ATTR void VKAPI_CALL
radv_CmdUpdateBuffer(VkCommandBuffer commandBuffer,VkBuffer dstBuffer,VkDeviceSize dstOffset,VkDeviceSize dataSize,const void * pData)366 radv_CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize,
367                      const void *pData)
368 {
369    VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
370    VK_FROM_HANDLE(radv_buffer, dst_buffer, dstBuffer);
371    struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
372    uint64_t va = radv_buffer_get_va(dst_buffer->bo);
373    va += dstOffset + dst_buffer->offset;
374 
375    assert(!(dataSize & 3));
376    assert(!(va & 3));
377 
378    if (!dataSize)
379       return;
380 
381    if (dataSize < RADV_BUFFER_UPDATE_THRESHOLD && cmd_buffer->qf != RADV_QUEUE_TRANSFER) {
382       radv_cs_add_buffer(device->ws, cmd_buffer->cs, dst_buffer->bo);
383       radv_update_buffer_cp(cmd_buffer, va, pData, dataSize);
384    } else {
385       uint32_t buf_offset;
386       radv_cmd_buffer_upload_data(cmd_buffer, dataSize, pData, &buf_offset);
387       radv_copy_buffer(cmd_buffer, cmd_buffer->upload.upload_bo, dst_buffer->bo, buf_offset,
388                        dstOffset + dst_buffer->offset, dataSize);
389    }
390 }
391