• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #include "nir/nir_builder.h"
2 #include "radv_meta.h"
3 #include "radv_sdma.h"
4 
5 #include "radv_cs.h"
6 #include "sid.h"
7 #include "vk_common_entrypoints.h"
8 
9 static nir_shader *
build_buffer_fill_shader(struct radv_device * dev)10 build_buffer_fill_shader(struct radv_device *dev)
11 {
12    nir_builder b = radv_meta_init_shader(dev, MESA_SHADER_COMPUTE, "meta_buffer_fill");
13    b.shader->info.workgroup_size[0] = 64;
14 
15    nir_def *pconst = nir_load_push_constant(&b, 4, 32, nir_imm_int(&b, 0), .range = 16);
16    nir_def *buffer_addr = nir_pack_64_2x32(&b, nir_channels(&b, pconst, 0b0011));
17    nir_def *max_offset = nir_channel(&b, pconst, 2);
18    nir_def *data = nir_swizzle(&b, nir_channel(&b, pconst, 3), (unsigned[]){0, 0, 0, 0}, 4);
19 
20    nir_def *global_id =
21       nir_iadd(&b, nir_imul_imm(&b, nir_channel(&b, nir_load_workgroup_id(&b), 0), b.shader->info.workgroup_size[0]),
22                nir_load_local_invocation_index(&b));
23 
24    nir_def *offset = nir_imin(&b, nir_imul_imm(&b, global_id, 16), max_offset);
25    nir_def *dst_addr = nir_iadd(&b, buffer_addr, nir_u2u64(&b, offset));
26    nir_build_store_global(&b, data, dst_addr, .align_mul = 4);
27 
28    return b.shader;
29 }
30 
31 static nir_shader *
build_buffer_copy_shader(struct radv_device * dev)32 build_buffer_copy_shader(struct radv_device *dev)
33 {
34    nir_builder b = radv_meta_init_shader(dev, MESA_SHADER_COMPUTE, "meta_buffer_copy");
35    b.shader->info.workgroup_size[0] = 64;
36 
37    nir_def *pconst = nir_load_push_constant(&b, 4, 32, nir_imm_int(&b, 0), .range = 16);
38    nir_def *max_offset = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0), .base = 16, .range = 4);
39    nir_def *src_addr = nir_pack_64_2x32(&b, nir_channels(&b, pconst, 0b0011));
40    nir_def *dst_addr = nir_pack_64_2x32(&b, nir_channels(&b, pconst, 0b1100));
41 
42    nir_def *global_id =
43       nir_iadd(&b, nir_imul_imm(&b, nir_channel(&b, nir_load_workgroup_id(&b), 0), b.shader->info.workgroup_size[0]),
44                nir_load_local_invocation_index(&b));
45 
46    nir_def *offset = nir_u2u64(&b, nir_imin(&b, nir_imul_imm(&b, global_id, 16), max_offset));
47 
48    nir_def *data = nir_build_load_global(&b, 4, 32, nir_iadd(&b, src_addr, offset), .align_mul = 4);
49    nir_build_store_global(&b, data, nir_iadd(&b, dst_addr, offset), .align_mul = 4);
50 
51    return b.shader;
52 }
53 
54 struct fill_constants {
55    uint64_t addr;
56    uint32_t max_offset;
57    uint32_t data;
58 };
59 
60 struct copy_constants {
61    uint64_t src_addr;
62    uint64_t dst_addr;
63    uint32_t max_offset;
64 };
65 
66 VkResult
radv_device_init_meta_buffer_state(struct radv_device * device)67 radv_device_init_meta_buffer_state(struct radv_device *device)
68 {
69    VkResult result;
70    nir_shader *fill_cs = build_buffer_fill_shader(device);
71    nir_shader *copy_cs = build_buffer_copy_shader(device);
72 
73    VkPipelineLayoutCreateInfo fill_pl_create_info = {
74       .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
75       .setLayoutCount = 0,
76       .pushConstantRangeCount = 1,
77       .pPushConstantRanges = &(VkPushConstantRange){VK_SHADER_STAGE_COMPUTE_BIT, 0, sizeof(struct fill_constants)},
78    };
79 
80    result = radv_CreatePipelineLayout(radv_device_to_handle(device), &fill_pl_create_info, &device->meta_state.alloc,
81                                       &device->meta_state.buffer.fill_p_layout);
82    if (result != VK_SUCCESS)
83       goto fail;
84 
85    VkPipelineLayoutCreateInfo copy_pl_create_info = {
86       .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
87       .setLayoutCount = 0,
88       .pushConstantRangeCount = 1,
89       .pPushConstantRanges = &(VkPushConstantRange){VK_SHADER_STAGE_COMPUTE_BIT, 0, sizeof(struct copy_constants)},
90    };
91 
92    result = radv_CreatePipelineLayout(radv_device_to_handle(device), &copy_pl_create_info, &device->meta_state.alloc,
93                                       &device->meta_state.buffer.copy_p_layout);
94    if (result != VK_SUCCESS)
95       goto fail;
96 
97    VkPipelineShaderStageCreateInfo fill_pipeline_shader_stage = {
98       .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
99       .stage = VK_SHADER_STAGE_COMPUTE_BIT,
100       .module = vk_shader_module_handle_from_nir(fill_cs),
101       .pName = "main",
102       .pSpecializationInfo = NULL,
103    };
104 
105    VkComputePipelineCreateInfo fill_vk_pipeline_info = {
106       .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
107       .stage = fill_pipeline_shader_stage,
108       .flags = 0,
109       .layout = device->meta_state.buffer.fill_p_layout,
110    };
111 
112    result = radv_compute_pipeline_create(radv_device_to_handle(device), device->meta_state.cache,
113                                          &fill_vk_pipeline_info, NULL, &device->meta_state.buffer.fill_pipeline);
114    if (result != VK_SUCCESS)
115       goto fail;
116 
117    VkPipelineShaderStageCreateInfo copy_pipeline_shader_stage = {
118       .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
119       .stage = VK_SHADER_STAGE_COMPUTE_BIT,
120       .module = vk_shader_module_handle_from_nir(copy_cs),
121       .pName = "main",
122       .pSpecializationInfo = NULL,
123    };
124 
125    VkComputePipelineCreateInfo copy_vk_pipeline_info = {
126       .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
127       .stage = copy_pipeline_shader_stage,
128       .flags = 0,
129       .layout = device->meta_state.buffer.copy_p_layout,
130    };
131 
132    result = radv_compute_pipeline_create(radv_device_to_handle(device), device->meta_state.cache,
133                                          &copy_vk_pipeline_info, NULL, &device->meta_state.buffer.copy_pipeline);
134    if (result != VK_SUCCESS)
135       goto fail;
136 
137    ralloc_free(fill_cs);
138    ralloc_free(copy_cs);
139    return VK_SUCCESS;
140 fail:
141    ralloc_free(fill_cs);
142    ralloc_free(copy_cs);
143    return result;
144 }
145 
146 void
radv_device_finish_meta_buffer_state(struct radv_device * device)147 radv_device_finish_meta_buffer_state(struct radv_device *device)
148 {
149    struct radv_meta_state *state = &device->meta_state;
150 
151    radv_DestroyPipeline(radv_device_to_handle(device), state->buffer.copy_pipeline, &state->alloc);
152    radv_DestroyPipeline(radv_device_to_handle(device), state->buffer.fill_pipeline, &state->alloc);
153    radv_DestroyPipelineLayout(radv_device_to_handle(device), state->buffer.copy_p_layout, &state->alloc);
154    radv_DestroyPipelineLayout(radv_device_to_handle(device), state->buffer.fill_p_layout, &state->alloc);
155 }
156 
157 static void
fill_buffer_shader(struct radv_cmd_buffer * cmd_buffer,uint64_t va,uint64_t size,uint32_t data)158 fill_buffer_shader(struct radv_cmd_buffer *cmd_buffer, uint64_t va, uint64_t size, uint32_t data)
159 {
160    struct radv_device *device = cmd_buffer->device;
161    struct radv_meta_saved_state saved_state;
162 
163    radv_meta_save(&saved_state, cmd_buffer, RADV_META_SAVE_COMPUTE_PIPELINE | RADV_META_SAVE_CONSTANTS);
164 
165    radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer), VK_PIPELINE_BIND_POINT_COMPUTE,
166                         device->meta_state.buffer.fill_pipeline);
167 
168    assert(size >= 16 && size <= UINT32_MAX);
169 
170    struct fill_constants fill_consts = {
171       .addr = va,
172       .max_offset = size - 16,
173       .data = data,
174    };
175 
176    vk_common_CmdPushConstants(radv_cmd_buffer_to_handle(cmd_buffer), device->meta_state.buffer.fill_p_layout,
177                               VK_SHADER_STAGE_COMPUTE_BIT, 0, sizeof(fill_consts), &fill_consts);
178 
179    radv_unaligned_dispatch(cmd_buffer, DIV_ROUND_UP(size, 16), 1, 1);
180 
181    radv_meta_restore(&saved_state, cmd_buffer);
182 }
183 
184 static void
copy_buffer_shader(struct radv_cmd_buffer * cmd_buffer,uint64_t src_va,uint64_t dst_va,uint64_t size)185 copy_buffer_shader(struct radv_cmd_buffer *cmd_buffer, uint64_t src_va, uint64_t dst_va, uint64_t size)
186 {
187    struct radv_device *device = cmd_buffer->device;
188    struct radv_meta_saved_state saved_state;
189 
190    radv_meta_save(&saved_state, cmd_buffer, RADV_META_SAVE_COMPUTE_PIPELINE | RADV_META_SAVE_CONSTANTS);
191 
192    radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer), VK_PIPELINE_BIND_POINT_COMPUTE,
193                         device->meta_state.buffer.copy_pipeline);
194 
195    assert(size >= 16 && size <= UINT32_MAX);
196 
197    struct copy_constants copy_consts = {
198       .src_addr = src_va,
199       .dst_addr = dst_va,
200       .max_offset = size - 16,
201    };
202 
203    vk_common_CmdPushConstants(radv_cmd_buffer_to_handle(cmd_buffer), device->meta_state.buffer.copy_p_layout,
204                               VK_SHADER_STAGE_COMPUTE_BIT, 0, sizeof(copy_consts), &copy_consts);
205 
206    radv_unaligned_dispatch(cmd_buffer, DIV_ROUND_UP(size, 16), 1, 1);
207 
208    radv_meta_restore(&saved_state, cmd_buffer);
209 }
210 
211 static bool
radv_prefer_compute_dma(const struct radv_device * device,uint64_t size,struct radeon_winsys_bo * src_bo,struct radeon_winsys_bo * dst_bo)212 radv_prefer_compute_dma(const struct radv_device *device, uint64_t size, struct radeon_winsys_bo *src_bo,
213                         struct radeon_winsys_bo *dst_bo)
214 {
215    bool use_compute = size >= RADV_BUFFER_OPS_CS_THRESHOLD;
216 
217    if (device->physical_device->rad_info.gfx_level >= GFX10 && device->physical_device->rad_info.has_dedicated_vram) {
218       if ((src_bo && !(src_bo->initial_domain & RADEON_DOMAIN_VRAM)) ||
219           (dst_bo && !(dst_bo->initial_domain & RADEON_DOMAIN_VRAM))) {
220          /* Prefer CP DMA for GTT on dGPUS due to slow PCIe. */
221          use_compute = false;
222       }
223    }
224 
225    return use_compute;
226 }
227 
228 uint32_t
radv_fill_buffer(struct radv_cmd_buffer * cmd_buffer,const struct radv_image * image,struct radeon_winsys_bo * bo,uint64_t va,uint64_t size,uint32_t value)229 radv_fill_buffer(struct radv_cmd_buffer *cmd_buffer, const struct radv_image *image, struct radeon_winsys_bo *bo,
230                  uint64_t va, uint64_t size, uint32_t value)
231 {
232    bool use_compute = radv_prefer_compute_dma(cmd_buffer->device, size, NULL, bo);
233    uint32_t flush_bits = 0;
234 
235    assert(!(va & 3));
236    assert(!(size & 3));
237 
238    if (bo)
239       radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, bo);
240 
241    if (cmd_buffer->qf == RADV_QUEUE_TRANSFER) {
242       radv_sdma_fill_buffer(cmd_buffer->device, cmd_buffer->cs, va, size, value);
243    } else if (use_compute) {
244       cmd_buffer->state.flush_bits |= radv_dst_access_flush(cmd_buffer, VK_ACCESS_2_SHADER_WRITE_BIT, image);
245 
246       fill_buffer_shader(cmd_buffer, va, size, value);
247 
248       flush_bits = RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_INV_VCACHE |
249                    radv_src_access_flush(cmd_buffer, VK_ACCESS_2_SHADER_WRITE_BIT, image);
250    } else if (size)
251       radv_cp_dma_clear_buffer(cmd_buffer, va, size, value);
252 
253    return flush_bits;
254 }
255 
256 void
radv_copy_buffer(struct radv_cmd_buffer * cmd_buffer,struct radeon_winsys_bo * src_bo,struct radeon_winsys_bo * dst_bo,uint64_t src_offset,uint64_t dst_offset,uint64_t size)257 radv_copy_buffer(struct radv_cmd_buffer *cmd_buffer, struct radeon_winsys_bo *src_bo, struct radeon_winsys_bo *dst_bo,
258                  uint64_t src_offset, uint64_t dst_offset, uint64_t size)
259 {
260    bool use_compute = !(size & 3) && !(src_offset & 3) && !(dst_offset & 3) &&
261                       radv_prefer_compute_dma(cmd_buffer->device, size, src_bo, dst_bo);
262 
263    uint64_t src_va = radv_buffer_get_va(src_bo) + src_offset;
264    uint64_t dst_va = radv_buffer_get_va(dst_bo) + dst_offset;
265 
266    radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, src_bo);
267    radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, dst_bo);
268 
269    if (cmd_buffer->qf == RADV_QUEUE_TRANSFER)
270       radv_sdma_copy_buffer(cmd_buffer->device, cmd_buffer->cs, src_va, dst_va, size);
271    else if (use_compute)
272       copy_buffer_shader(cmd_buffer, src_va, dst_va, size);
273    else if (size)
274       radv_cp_dma_buffer_copy(cmd_buffer, src_va, dst_va, size);
275 }
276 
277 VKAPI_ATTR void VKAPI_CALL
radv_CmdFillBuffer(VkCommandBuffer commandBuffer,VkBuffer dstBuffer,VkDeviceSize dstOffset,VkDeviceSize fillSize,uint32_t data)278 radv_CmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize fillSize,
279                    uint32_t data)
280 {
281    RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
282    RADV_FROM_HANDLE(radv_buffer, dst_buffer, dstBuffer);
283 
284    fillSize = vk_buffer_range(&dst_buffer->vk, dstOffset, fillSize) & ~3ull;
285 
286    radv_fill_buffer(cmd_buffer, NULL, dst_buffer->bo,
287                     radv_buffer_get_va(dst_buffer->bo) + dst_buffer->offset + dstOffset, fillSize, data);
288 }
289 
290 static void
copy_buffer(struct radv_cmd_buffer * cmd_buffer,struct radv_buffer * src_buffer,struct radv_buffer * dst_buffer,const VkBufferCopy2 * region)291 copy_buffer(struct radv_cmd_buffer *cmd_buffer, struct radv_buffer *src_buffer, struct radv_buffer *dst_buffer,
292             const VkBufferCopy2 *region)
293 {
294    bool old_predicating;
295 
296    /* VK_EXT_conditional_rendering says that copy commands should not be
297     * affected by conditional rendering.
298     */
299    old_predicating = cmd_buffer->state.predicating;
300    cmd_buffer->state.predicating = false;
301 
302    radv_copy_buffer(cmd_buffer, src_buffer->bo, dst_buffer->bo, src_buffer->offset + region->srcOffset,
303                     dst_buffer->offset + region->dstOffset, region->size);
304 
305    /* Restore conditional rendering. */
306    cmd_buffer->state.predicating = old_predicating;
307 }
308 
309 VKAPI_ATTR void VKAPI_CALL
radv_CmdCopyBuffer2(VkCommandBuffer commandBuffer,const VkCopyBufferInfo2 * pCopyBufferInfo)310 radv_CmdCopyBuffer2(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2 *pCopyBufferInfo)
311 {
312    RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
313    RADV_FROM_HANDLE(radv_buffer, src_buffer, pCopyBufferInfo->srcBuffer);
314    RADV_FROM_HANDLE(radv_buffer, dst_buffer, pCopyBufferInfo->dstBuffer);
315 
316    for (unsigned r = 0; r < pCopyBufferInfo->regionCount; r++) {
317       copy_buffer(cmd_buffer, src_buffer, dst_buffer, &pCopyBufferInfo->pRegions[r]);
318    }
319 }
320 
321 void
radv_update_buffer_cp(struct radv_cmd_buffer * cmd_buffer,uint64_t va,const void * data,uint64_t size)322 radv_update_buffer_cp(struct radv_cmd_buffer *cmd_buffer, uint64_t va, const void *data, uint64_t size)
323 {
324    uint64_t words = size / 4;
325    bool mec = radv_cmd_buffer_uses_mec(cmd_buffer);
326 
327    assert(size < RADV_BUFFER_UPDATE_THRESHOLD);
328 
329    radv_emit_cache_flush(cmd_buffer);
330    radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, words + 4);
331 
332    radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 2 + words, 0));
333    radeon_emit(cmd_buffer->cs,
334                S_370_DST_SEL(mec ? V_370_MEM : V_370_MEM_GRBM) | S_370_WR_CONFIRM(1) | S_370_ENGINE_SEL(V_370_ME));
335    radeon_emit(cmd_buffer->cs, va);
336    radeon_emit(cmd_buffer->cs, va >> 32);
337    radeon_emit_array(cmd_buffer->cs, data, words);
338 
339    if (radv_device_fault_detection_enabled(cmd_buffer->device))
340       radv_cmd_buffer_trace_emit(cmd_buffer);
341 }
342 
343 VKAPI_ATTR void VKAPI_CALL
radv_CmdUpdateBuffer(VkCommandBuffer commandBuffer,VkBuffer dstBuffer,VkDeviceSize dstOffset,VkDeviceSize dataSize,const void * pData)344 radv_CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize,
345                      const void *pData)
346 {
347    RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
348    RADV_FROM_HANDLE(radv_buffer, dst_buffer, dstBuffer);
349    uint64_t va = radv_buffer_get_va(dst_buffer->bo);
350    va += dstOffset + dst_buffer->offset;
351 
352    assert(!(dataSize & 3));
353    assert(!(va & 3));
354 
355    if (!dataSize)
356       return;
357 
358    if (dataSize < RADV_BUFFER_UPDATE_THRESHOLD && cmd_buffer->qf != RADV_QUEUE_TRANSFER) {
359       radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, dst_buffer->bo);
360       radv_update_buffer_cp(cmd_buffer, va, pData, dataSize);
361    } else {
362       uint32_t buf_offset;
363       radv_cmd_buffer_upload_data(cmd_buffer, dataSize, pData, &buf_offset);
364       radv_copy_buffer(cmd_buffer, cmd_buffer->upload.upload_bo, dst_buffer->bo, buf_offset,
365                        dstOffset + dst_buffer->offset, dataSize);
366    }
367 }
368