• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3  * Copyright 2015-2021 Advanced Micro Devices, Inc.
4  * Copyright 2023 Valve Corporation
5  * All Rights Reserved.
6  *
7  * SPDX-License-Identifier: MIT
8  */
9 
10 #include "radv_sdma.h"
11 #include "util/macros.h"
12 #include "util/u_memory.h"
13 #include "radv_buffer.h"
14 #include "radv_cs.h"
15 #include "radv_formats.h"
16 
17 #include "ac_formats.h"
18 
19 struct radv_sdma_chunked_copy_info {
20    unsigned extent_horizontal_blocks;
21    unsigned extent_vertical_blocks;
22    unsigned aligned_row_pitch;
23    unsigned num_rows_per_copy;
24 };
25 
26 static const VkExtent3D radv_sdma_t2t_alignment_2d_and_planar[] = {
27    {16, 16, 1}, /* 1 bpp */
28    {16, 8, 1},  /* 2 bpp */
29    {8, 8, 1},   /* 4 bpp */
30    {8, 4, 1},   /* 8 bpp */
31    {4, 4, 1},   /* 16 bpp */
32 };
33 
34 static const VkExtent3D radv_sdma_t2t_alignment_3d[] = {
35    {8, 4, 8}, /* 1 bpp */
36    {4, 4, 8}, /* 2 bpp */
37    {4, 4, 4}, /* 4 bpp */
38    {4, 2, 4}, /* 8 bpp */
39    {2, 2, 4}, /* 16 bpp */
40 };
41 
42 ALWAYS_INLINE static unsigned
radv_sdma_pitch_alignment(const struct radv_device * device,const unsigned bpp)43 radv_sdma_pitch_alignment(const struct radv_device *device, const unsigned bpp)
44 {
45    const struct radv_physical_device *pdev = radv_device_physical(device);
46 
47    if (pdev->info.sdma_ip_version >= SDMA_5_0)
48       return MAX2(1, 4 / bpp);
49 
50    return 4;
51 }
52 
53 ALWAYS_INLINE static void
radv_sdma_check_pitches(const unsigned pitch,const unsigned slice_pitch,const unsigned bpp,const bool uses_depth)54 radv_sdma_check_pitches(const unsigned pitch, const unsigned slice_pitch, const unsigned bpp, const bool uses_depth)
55 {
56    ASSERTED const unsigned pitch_alignment = MAX2(1, 4 / bpp);
57    assert(pitch);
58    assert(pitch <= (1 << 14));
59    assert(util_is_aligned(pitch, pitch_alignment));
60 
61    if (uses_depth) {
62       ASSERTED const unsigned slice_pitch_alignment = 4;
63       assert(slice_pitch);
64       assert(slice_pitch <= (1 << 28));
65       assert(util_is_aligned(slice_pitch, slice_pitch_alignment));
66    }
67 }
68 
69 ALWAYS_INLINE static enum gfx9_resource_type
radv_sdma_surface_resource_type(const struct radv_device * const device,const struct radeon_surf * const surf)70 radv_sdma_surface_resource_type(const struct radv_device *const device, const struct radeon_surf *const surf)
71 {
72    const struct radv_physical_device *pdev = radv_device_physical(device);
73 
74    if (pdev->info.sdma_ip_version >= SDMA_5_0) {
75       /* Use the 2D resource type for rotated or Z swizzles. */
76       if ((surf->u.gfx9.resource_type == RADEON_RESOURCE_1D || surf->u.gfx9.resource_type == RADEON_RESOURCE_3D) &&
77           (surf->micro_tile_mode == RADEON_MICRO_MODE_RENDER || surf->micro_tile_mode == RADEON_MICRO_MODE_DEPTH))
78          return RADEON_RESOURCE_2D;
79    }
80 
81    return surf->u.gfx9.resource_type;
82 }
83 
84 ALWAYS_INLINE static uint32_t
radv_sdma_surface_type_from_aspect_mask(const VkImageAspectFlags aspectMask)85 radv_sdma_surface_type_from_aspect_mask(const VkImageAspectFlags aspectMask)
86 {
87    if (aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT)
88       return 1;
89    else if (aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT)
90       return 2;
91 
92    return 0;
93 }
94 
95 ALWAYS_INLINE static VkExtent3D
radv_sdma_pixel_extent_to_blocks(const VkExtent3D extent,const unsigned blk_w,const unsigned blk_h)96 radv_sdma_pixel_extent_to_blocks(const VkExtent3D extent, const unsigned blk_w, const unsigned blk_h)
97 {
98    const VkExtent3D r = {
99       .width = DIV_ROUND_UP(extent.width, blk_w),
100       .height = DIV_ROUND_UP(extent.height, blk_h),
101       .depth = extent.depth,
102    };
103 
104    return r;
105 }
106 
107 ALWAYS_INLINE static VkOffset3D
radv_sdma_pixel_offset_to_blocks(const VkOffset3D offset,const unsigned blk_w,const unsigned blk_h)108 radv_sdma_pixel_offset_to_blocks(const VkOffset3D offset, const unsigned blk_w, const unsigned blk_h)
109 {
110    const VkOffset3D r = {
111       .x = DIV_ROUND_UP(offset.x, blk_w),
112       .y = DIV_ROUND_UP(offset.y, blk_h),
113       .z = offset.z,
114    };
115 
116    return r;
117 }
118 
119 ALWAYS_INLINE static unsigned
radv_sdma_pixels_to_blocks(const unsigned linear_pitch,const unsigned blk_w)120 radv_sdma_pixels_to_blocks(const unsigned linear_pitch, const unsigned blk_w)
121 {
122    return DIV_ROUND_UP(linear_pitch, blk_w);
123 }
124 
125 ALWAYS_INLINE static unsigned
radv_sdma_pixel_area_to_blocks(const unsigned linear_slice_pitch,const unsigned blk_w,const unsigned blk_h)126 radv_sdma_pixel_area_to_blocks(const unsigned linear_slice_pitch, const unsigned blk_w, const unsigned blk_h)
127 {
128    return DIV_ROUND_UP(DIV_ROUND_UP(linear_slice_pitch, blk_w), blk_h);
129 }
130 
131 static struct radv_sdma_chunked_copy_info
radv_sdma_get_chunked_copy_info(const struct radv_device * const device,const struct radv_sdma_surf * const img,const VkExtent3D extent)132 radv_sdma_get_chunked_copy_info(const struct radv_device *const device, const struct radv_sdma_surf *const img,
133                                 const VkExtent3D extent)
134 {
135    const unsigned extent_horizontal_blocks = DIV_ROUND_UP(extent.width, img->blk_w);
136    const unsigned extent_vertical_blocks = DIV_ROUND_UP(extent.height, img->blk_h);
137    const unsigned aligned_row_pitch = ALIGN(extent_horizontal_blocks, 4);
138    const unsigned aligned_row_bytes = aligned_row_pitch * img->bpp;
139 
140    /* Assume that we can always copy at least one full row at a time. */
141    const unsigned max_num_rows_per_copy = MIN2(RADV_SDMA_TRANSFER_TEMP_BYTES / aligned_row_bytes, extent.height);
142    assert(max_num_rows_per_copy);
143 
144    /* Ensure that the number of rows copied at a time is a power of two. */
145    const unsigned num_rows_per_copy = MAX2(1, util_next_power_of_two(max_num_rows_per_copy + 1) / 2);
146 
147    const struct radv_sdma_chunked_copy_info r = {
148       .extent_horizontal_blocks = extent_horizontal_blocks,
149       .extent_vertical_blocks = extent_vertical_blocks,
150       .aligned_row_pitch = aligned_row_pitch,
151       .num_rows_per_copy = num_rows_per_copy,
152    };
153 
154    return r;
155 }
156 
157 static uint32_t
radv_sdma_get_bpe(const struct radv_image * const image,VkImageAspectFlags aspect_mask)158 radv_sdma_get_bpe(const struct radv_image *const image, VkImageAspectFlags aspect_mask)
159 {
160    const unsigned plane_idx = radv_plane_from_aspect(aspect_mask);
161    const struct radeon_surf *surf = &image->planes[plane_idx].surface;
162    const bool is_stencil_only = aspect_mask == VK_IMAGE_ASPECT_STENCIL_BIT;
163 
164    return is_stencil_only ? 1 : surf->bpe;
165 }
166 
167 struct radv_sdma_surf
radv_sdma_get_buf_surf(const struct radv_buffer * const buffer,const struct radv_image * const image,const VkBufferImageCopy2 * const region,const VkImageAspectFlags aspect_mask)168 radv_sdma_get_buf_surf(const struct radv_buffer *const buffer, const struct radv_image *const image,
169                        const VkBufferImageCopy2 *const region, const VkImageAspectFlags aspect_mask)
170 {
171    assert(util_bitcount(aspect_mask) == 1);
172 
173    const unsigned pitch = (region->bufferRowLength ? region->bufferRowLength : region->imageExtent.width);
174    const unsigned slice_pitch =
175       (region->bufferImageHeight ? region->bufferImageHeight : region->imageExtent.height) * pitch;
176 
177    const unsigned plane_idx = radv_plane_from_aspect(region->imageSubresource.aspectMask);
178    const struct radeon_surf *surf = &image->planes[plane_idx].surface;
179    const uint32_t bpe = radv_sdma_get_bpe(image, region->imageSubresource.aspectMask);
180 
181    const struct radv_sdma_surf info = {
182       .va = radv_buffer_get_va(buffer->bo) + buffer->offset + region->bufferOffset,
183       .pitch = pitch,
184       .slice_pitch = slice_pitch,
185       .bpp = bpe,
186       .blk_w = surf->blk_w,
187       .blk_h = surf->blk_h,
188       .is_linear = true,
189    };
190 
191    return info;
192 }
193 
194 static uint32_t
radv_sdma_get_metadata_config(const struct radv_device * const device,const struct radv_image * const image,const struct radeon_surf * const surf,const VkImageSubresourceLayers subresource,const VkImageAspectFlags aspect_mask)195 radv_sdma_get_metadata_config(const struct radv_device *const device, const struct radv_image *const image,
196                               const struct radeon_surf *const surf, const VkImageSubresourceLayers subresource,
197                               const VkImageAspectFlags aspect_mask)
198 {
199    const struct radv_physical_device *pdev = radv_device_physical(device);
200 
201    if (!pdev->info.sdma_supports_compression ||
202        !(radv_dcc_enabled(image, subresource.mipLevel) || radv_image_has_htile(image))) {
203       return 0;
204    }
205 
206    const VkFormat format = vk_format_get_aspect_format(image->vk.format, aspect_mask);
207    const struct util_format_description *desc = vk_format_description(format);
208 
209    const uint32_t data_format = ac_get_cb_format(pdev->info.gfx_level, radv_format_to_pipe_format(format));
210    const uint32_t alpha_is_on_msb = ac_alpha_is_on_msb(&pdev->info, radv_format_to_pipe_format(format));
211    const uint32_t number_type = radv_translate_buffer_numformat(desc, vk_format_get_first_non_void_channel(format));
212    const uint32_t surface_type = radv_sdma_surface_type_from_aspect_mask(aspect_mask);
213    const uint32_t max_comp_block_size = surf->u.gfx9.color.dcc.max_compressed_block_size;
214    const uint32_t max_uncomp_block_size = radv_get_dcc_max_uncompressed_block_size(device, image);
215    const uint32_t pipe_aligned = surf->u.gfx9.color.dcc.pipe_aligned;
216 
217    return data_format | alpha_is_on_msb << 8 | number_type << 9 | surface_type << 12 | max_comp_block_size << 24 |
218           max_uncomp_block_size << 26 | pipe_aligned << 31;
219 }
220 
221 static uint32_t
radv_sdma_get_tiled_info_dword(const struct radv_device * const device,const struct radv_image * const image,const struct radeon_surf * const surf,const VkImageSubresourceLayers subresource)222 radv_sdma_get_tiled_info_dword(const struct radv_device *const device, const struct radv_image *const image,
223                                const struct radeon_surf *const surf, const VkImageSubresourceLayers subresource)
224 {
225    const struct radv_physical_device *pdev = radv_device_physical(device);
226    const uint32_t bpe = radv_sdma_get_bpe(image, subresource.aspectMask);
227    const uint32_t element_size = util_logbase2(bpe);
228    const uint32_t swizzle_mode = surf->has_stencil ? surf->u.gfx9.zs.stencil_swizzle_mode : surf->u.gfx9.swizzle_mode;
229    uint32_t info = element_size | swizzle_mode << 3;
230    const enum sdma_version ver = pdev->info.sdma_ip_version;
231 
232    if (ver < SDMA_7_0) {
233       const enum gfx9_resource_type dimension = radv_sdma_surface_resource_type(device, surf);
234       info |= dimension << 9;
235    }
236 
237    if (ver >= SDMA_5_0) {
238       const uint32_t mip_max = MAX2(image->vk.mip_levels, 1);
239       const uint32_t mip_id = subresource.mipLevel;
240 
241       return info | (mip_max - 1) << 16 | mip_id << 20;
242    } else if (ver >= SDMA_4_0) {
243       return info | surf->u.gfx9.epitch << 16;
244    } else {
245       unreachable("unsupported SDMA version");
246    }
247 }
248 
249 static uint32_t
radv_sdma_get_tiled_header_dword(const struct radv_device * const device,const struct radv_image * const image,const VkImageSubresourceLayers subresource)250 radv_sdma_get_tiled_header_dword(const struct radv_device *const device, const struct radv_image *const image,
251                                  const VkImageSubresourceLayers subresource)
252 {
253    const struct radv_physical_device *pdev = radv_device_physical(device);
254    const enum sdma_version ver = pdev->info.sdma_ip_version;
255 
256    if (ver >= SDMA_5_0) {
257       return 0;
258    } else if (ver >= SDMA_4_0) {
259       const uint32_t mip_max = MAX2(image->vk.mip_levels, 1);
260       const uint32_t mip_id = subresource.mipLevel;
261       return (mip_max - 1) << 20 | mip_id << 24;
262    } else {
263       unreachable("unsupported SDMA version");
264    }
265 }
266 
267 struct radv_sdma_surf
radv_sdma_get_surf(const struct radv_device * const device,const struct radv_image * const image,const VkImageSubresourceLayers subresource,const VkOffset3D offset,const VkImageAspectFlags aspect_mask)268 radv_sdma_get_surf(const struct radv_device *const device, const struct radv_image *const image,
269                    const VkImageSubresourceLayers subresource, const VkOffset3D offset,
270                    const VkImageAspectFlags aspect_mask)
271 {
272    assert(util_bitcount(aspect_mask) == 1);
273 
274    const struct radv_physical_device *pdev = radv_device_physical(device);
275    const unsigned plane_idx = radv_plane_from_aspect(aspect_mask);
276    const unsigned binding_idx = image->disjoint ? plane_idx : 0;
277    const struct radeon_surf *const surf = &image->planes[plane_idx].surface;
278    const uint64_t va = radv_image_get_va(image, binding_idx);
279    const uint32_t bpe = radv_sdma_get_bpe(image, aspect_mask);
280    struct radv_sdma_surf info = {
281       .extent =
282          {
283             .width = vk_format_get_plane_width(image->vk.format, plane_idx, image->vk.extent.width),
284             .height = vk_format_get_plane_height(image->vk.format, plane_idx, image->vk.extent.height),
285             .depth = image->vk.image_type == VK_IMAGE_TYPE_3D ? image->vk.extent.depth : image->vk.array_layers,
286          },
287       .offset =
288          {
289             .x = offset.x,
290             .y = offset.y,
291             .z = image->vk.image_type == VK_IMAGE_TYPE_3D ? offset.z : subresource.baseArrayLayer,
292          },
293       .bpp = bpe,
294       .blk_w = surf->blk_w,
295       .blk_h = surf->blk_h,
296       .mip_levels = image->vk.mip_levels,
297       .micro_tile_mode = surf->micro_tile_mode,
298       .is_linear = surf->is_linear,
299       .is_3d = surf->u.gfx9.resource_type == RADEON_RESOURCE_3D,
300    };
301 
302    const uint64_t surf_offset =
303       (aspect_mask == VK_IMAGE_ASPECT_STENCIL_BIT) ? surf->u.gfx9.zs.stencil_offset : surf->u.gfx9.surf_offset;
304 
305    if (surf->is_linear) {
306       info.va = va + surf_offset + surf->u.gfx9.offset[subresource.mipLevel];
307       info.pitch = surf->u.gfx9.pitch[subresource.mipLevel];
308       info.slice_pitch = surf->blk_w * surf->blk_h * surf->u.gfx9.surf_slice_size / bpe;
309    } else {
310       /* 1D resources should be linear. */
311       assert(surf->u.gfx9.resource_type != RADEON_RESOURCE_1D);
312 
313       info.va = (va + surf_offset) | surf->tile_swizzle << 8;
314 
315       info.info_dword = radv_sdma_get_tiled_info_dword(device, image, surf, subresource);
316       info.header_dword = radv_sdma_get_tiled_header_dword(device, image, subresource);
317 
318       if (pdev->info.sdma_supports_compression &&
319           (radv_dcc_enabled(image, subresource.mipLevel) || radv_image_has_htile(image))) {
320          info.meta_va = va + surf->meta_offset;
321          info.meta_config = radv_sdma_get_metadata_config(device, image, surf, subresource, aspect_mask);
322       }
323    }
324 
325    return info;
326 }
327 
328 static void
radv_sdma_emit_nop(const struct radv_device * device,struct radeon_cmdbuf * cs)329 radv_sdma_emit_nop(const struct radv_device *device, struct radeon_cmdbuf *cs)
330 {
331    /* SDMA NOP acts as a fence command and causes the SDMA engine to wait for pending copy operations. */
332    radeon_check_space(device->ws, cs, 1);
333    radeon_emit(cs, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
334 }
335 
336 void
radv_sdma_copy_buffer(const struct radv_device * device,struct radeon_cmdbuf * cs,uint64_t src_va,uint64_t dst_va,uint64_t size)337 radv_sdma_copy_buffer(const struct radv_device *device, struct radeon_cmdbuf *cs, uint64_t src_va, uint64_t dst_va,
338                       uint64_t size)
339 {
340    if (size == 0)
341       return;
342 
343    const struct radv_physical_device *pdev = radv_device_physical(device);
344    const enum sdma_version ver = pdev->info.sdma_ip_version;
345    const unsigned max_size_per_packet = ver >= SDMA_5_2 ? SDMA_V5_2_COPY_MAX_BYTES : SDMA_V2_0_COPY_MAX_BYTES;
346 
347    unsigned align = ~0u;
348    unsigned ncopy = DIV_ROUND_UP(size, max_size_per_packet);
349 
350    assert(ver >= SDMA_2_0);
351 
352    /* SDMA FW automatically enables a faster dword copy mode when
353     * source, destination and size are all dword-aligned.
354     *
355     * When source and destination are dword-aligned, round down the size to
356     * take advantage of faster dword copy, and copy the remaining few bytes
357     * with the last copy packet.
358     */
359    if ((src_va & 0x3) == 0 && (dst_va & 0x3) == 0 && size > 4 && (size & 0x3) != 0) {
360       align = ~0x3u;
361       ncopy++;
362    }
363 
364    radeon_check_space(device->ws, cs, ncopy * 7);
365 
366    for (unsigned i = 0; i < ncopy; i++) {
367       unsigned csize = size >= 4 ? MIN2(size & align, max_size_per_packet) : size;
368       radeon_emit(cs, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0));
369       radeon_emit(cs, ver >= SDMA_4_0 ? csize - 1 : csize);
370       radeon_emit(cs, 0); /* src/dst endian swap */
371       radeon_emit(cs, src_va);
372       radeon_emit(cs, src_va >> 32);
373       radeon_emit(cs, dst_va);
374       radeon_emit(cs, dst_va >> 32);
375       dst_va += csize;
376       src_va += csize;
377       size -= csize;
378    }
379 }
380 
381 void
radv_sdma_fill_buffer(const struct radv_device * device,struct radeon_cmdbuf * cs,const uint64_t va,const uint64_t size,const uint32_t value)382 radv_sdma_fill_buffer(const struct radv_device *device, struct radeon_cmdbuf *cs, const uint64_t va,
383                       const uint64_t size, const uint32_t value)
384 {
385    const struct radv_physical_device *pdev = radv_device_physical(device);
386 
387    const uint32_t fill_size = 2; /* This means that the count is in dwords. */
388    const uint32_t constant_fill_header = SDMA_PACKET(SDMA_OPCODE_CONSTANT_FILL, 0, 0) | (fill_size & 0x3) << 30;
389 
390    /* This packet is the same since SDMA v2.4, haven't bothered to check older versions. */
391    const enum sdma_version ver = pdev->info.sdma_ip_version;
392    assert(ver >= SDMA_2_4);
393 
394    /* Maximum allowed fill size depends on the GPU.
395     * Emit as many packets as necessary to fill all the bytes we need.
396     */
397    const uint64_t max_fill_bytes = BITFIELD64_MASK(ver >= SDMA_6_0 ? 30 : 22) & ~0x3;
398    const unsigned num_packets = DIV_ROUND_UP(size, max_fill_bytes);
399    ASSERTED unsigned cdw_max = radeon_check_space(device->ws, cs, num_packets * 5);
400 
401    for (unsigned i = 0; i < num_packets; ++i) {
402       const uint64_t offset = i * max_fill_bytes;
403       const uint64_t fill_bytes = MIN2(size - offset, max_fill_bytes);
404       const uint64_t fill_va = va + offset;
405 
406       radeon_emit(cs, constant_fill_header);
407       radeon_emit(cs, fill_va);
408       radeon_emit(cs, fill_va >> 32);
409       radeon_emit(cs, value);
410       radeon_emit(cs, fill_bytes - 1); /* Must be programmed in bytes, even if the fill is done in dwords. */
411    }
412 
413    assert(cs->cdw <= cdw_max);
414 }
415 
416 static void
radv_sdma_emit_copy_linear_sub_window(const struct radv_device * device,struct radeon_cmdbuf * cs,const struct radv_sdma_surf * const src,const struct radv_sdma_surf * const dst,const VkExtent3D pix_extent)417 radv_sdma_emit_copy_linear_sub_window(const struct radv_device *device, struct radeon_cmdbuf *cs,
418                                       const struct radv_sdma_surf *const src, const struct radv_sdma_surf *const dst,
419                                       const VkExtent3D pix_extent)
420 {
421    /* This packet is the same since SDMA v2.4, haven't bothered to check older versions.
422     * The main difference is the bitfield sizes:
423     *
424     * v2.4 - src/dst_pitch: 14 bits, rect_z: 11 bits
425     * v4.0 - src/dst_pitch: 19 bits, rect_z: 11 bits
426     * v5.0 - src/dst_pitch: 19 bits, rect_z: 13 bits
427     *
428     * We currently use the smallest limits (from SDMA v2.4).
429     */
430 
431    const VkOffset3D src_off = radv_sdma_pixel_offset_to_blocks(src->offset, src->blk_w, src->blk_h);
432    const VkOffset3D dst_off = radv_sdma_pixel_offset_to_blocks(dst->offset, dst->blk_w, dst->blk_h);
433    const VkExtent3D ext = radv_sdma_pixel_extent_to_blocks(pix_extent, src->blk_w, src->blk_h);
434    const unsigned src_pitch = radv_sdma_pixels_to_blocks(src->pitch, src->blk_w);
435    const unsigned dst_pitch = radv_sdma_pixels_to_blocks(dst->pitch, dst->blk_w);
436    const unsigned src_slice_pitch = radv_sdma_pixel_area_to_blocks(src->slice_pitch, src->blk_w, src->blk_h);
437    const unsigned dst_slice_pitch = radv_sdma_pixel_area_to_blocks(dst->slice_pitch, dst->blk_w, dst->blk_h);
438 
439    assert(src->bpp == dst->bpp);
440    assert(util_is_power_of_two_nonzero(src->bpp));
441    radv_sdma_check_pitches(src->pitch, src->slice_pitch, src->bpp, false);
442    radv_sdma_check_pitches(dst->pitch, dst->slice_pitch, dst->bpp, false);
443 
444    ASSERTED unsigned cdw_end = radeon_check_space(device->ws, cs, 13);
445 
446    radeon_emit(cs, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR_SUB_WINDOW, 0) | util_logbase2(src->bpp)
447                                                                                                  << 29);
448    radeon_emit(cs, src->va);
449    radeon_emit(cs, src->va >> 32);
450    radeon_emit(cs, src_off.x | src_off.y << 16);
451    radeon_emit(cs, src_off.z | (src_pitch - 1) << 13);
452    radeon_emit(cs, src_slice_pitch - 1);
453    radeon_emit(cs, dst->va);
454    radeon_emit(cs, dst->va >> 32);
455    radeon_emit(cs, dst_off.x | dst_off.y << 16);
456    radeon_emit(cs, dst_off.z | (dst_pitch - 1) << 13);
457    radeon_emit(cs, dst_slice_pitch - 1);
458    radeon_emit(cs, (ext.width - 1) | (ext.height - 1) << 16);
459    radeon_emit(cs, (ext.depth - 1));
460 
461    assert(cs->cdw == cdw_end);
462 }
463 
464 static void
radv_sdma_emit_copy_tiled_sub_window(const struct radv_device * device,struct radeon_cmdbuf * cs,const struct radv_sdma_surf * const tiled,const struct radv_sdma_surf * const linear,const VkExtent3D pix_extent,const bool detile)465 radv_sdma_emit_copy_tiled_sub_window(const struct radv_device *device, struct radeon_cmdbuf *cs,
466                                      const struct radv_sdma_surf *const tiled,
467                                      const struct radv_sdma_surf *const linear, const VkExtent3D pix_extent,
468                                      const bool detile)
469 {
470    const struct radv_physical_device *pdev = radv_device_physical(device);
471 
472    if (!pdev->info.sdma_supports_compression) {
473       assert(!tiled->meta_va);
474    }
475 
476    const VkOffset3D linear_off = radv_sdma_pixel_offset_to_blocks(linear->offset, linear->blk_w, linear->blk_h);
477    const VkOffset3D tiled_off = radv_sdma_pixel_offset_to_blocks(tiled->offset, tiled->blk_w, tiled->blk_h);
478    const VkExtent3D tiled_ext = radv_sdma_pixel_extent_to_blocks(tiled->extent, tiled->blk_w, tiled->blk_h);
479    const VkExtent3D ext = radv_sdma_pixel_extent_to_blocks(pix_extent, tiled->blk_w, tiled->blk_h);
480    const unsigned linear_pitch = radv_sdma_pixels_to_blocks(linear->pitch, tiled->blk_w);
481    const unsigned linear_slice_pitch = radv_sdma_pixel_area_to_blocks(linear->slice_pitch, tiled->blk_w, tiled->blk_h);
482    const bool dcc = !!tiled->meta_va;
483    const bool uses_depth = linear_off.z != 0 || tiled_off.z != 0 || ext.depth != 1;
484 
485    assert(util_is_power_of_two_nonzero(tiled->bpp));
486    radv_sdma_check_pitches(linear_pitch, linear_slice_pitch, tiled->bpp, uses_depth);
487 
488    ASSERTED unsigned cdw_end = radeon_check_space(device->ws, cs, 14 + (dcc ? 3 : 0));
489 
490    radeon_emit(cs, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_TILED_SUB_WINDOW, 0) | dcc << 19 | detile << 31 |
491                       tiled->header_dword);
492    radeon_emit(cs, tiled->va);
493    radeon_emit(cs, tiled->va >> 32);
494    radeon_emit(cs, tiled_off.x | tiled_off.y << 16);
495    radeon_emit(cs, tiled_off.z | (tiled_ext.width - 1) << 16);
496    radeon_emit(cs, (tiled_ext.height - 1) | (tiled_ext.depth - 1) << 16);
497    radeon_emit(cs, tiled->info_dword);
498    radeon_emit(cs, linear->va);
499    radeon_emit(cs, linear->va >> 32);
500    radeon_emit(cs, linear_off.x | linear_off.y << 16);
501    radeon_emit(cs, linear_off.z | (linear_pitch - 1) << 16);
502    radeon_emit(cs, linear_slice_pitch - 1);
503    radeon_emit(cs, (ext.width - 1) | (ext.height - 1) << 16);
504    radeon_emit(cs, (ext.depth - 1));
505 
506    if (tiled->meta_va) {
507       const unsigned write_compress_enable = !detile;
508       radeon_emit(cs, tiled->meta_va);
509       radeon_emit(cs, tiled->meta_va >> 32);
510       radeon_emit(cs, tiled->meta_config | write_compress_enable << 28);
511    }
512 
513    assert(cs->cdw == cdw_end);
514 }
515 
516 static void
radv_sdma_emit_copy_t2t_sub_window(const struct radv_device * device,struct radeon_cmdbuf * cs,const struct radv_sdma_surf * const src,const struct radv_sdma_surf * const dst,const VkExtent3D px_extent)517 radv_sdma_emit_copy_t2t_sub_window(const struct radv_device *device, struct radeon_cmdbuf *cs,
518                                    const struct radv_sdma_surf *const src, const struct radv_sdma_surf *const dst,
519                                    const VkExtent3D px_extent)
520 {
521    const struct radv_physical_device *pdev = radv_device_physical(device);
522 
523    /* We currently only support the SDMA v4+ versions of this packet. */
524    assert(pdev->info.sdma_ip_version >= SDMA_4_0);
525 
526    /* On GFX10+ this supports DCC, but cannot copy a compressed surface to another compressed surface. */
527    assert(!src->meta_va || !dst->meta_va);
528 
529    if (pdev->info.sdma_ip_version >= SDMA_4_0 && pdev->info.sdma_ip_version < SDMA_5_0) {
530       /* SDMA v4 doesn't support mip_id selection in the T2T copy packet. */
531       assert(src->header_dword >> 24 == 0);
532       assert(dst->header_dword >> 24 == 0);
533       /* SDMA v4 doesn't support any image metadata. */
534       assert(!src->meta_va);
535       assert(!dst->meta_va);
536    }
537 
538    /* Despite the name, this can indicate DCC or HTILE metadata. */
539    const uint32_t dcc = src->meta_va || dst->meta_va;
540    /* 0 = compress (src is uncompressed), 1 = decompress (src is compressed). */
541    const uint32_t dcc_dir = src->meta_va && !dst->meta_va;
542 
543    const VkOffset3D src_off = radv_sdma_pixel_offset_to_blocks(src->offset, src->blk_w, src->blk_h);
544    const VkOffset3D dst_off = radv_sdma_pixel_offset_to_blocks(dst->offset, dst->blk_w, dst->blk_h);
545    const VkExtent3D src_ext = radv_sdma_pixel_extent_to_blocks(src->extent, src->blk_w, src->blk_h);
546    const VkExtent3D dst_ext = radv_sdma_pixel_extent_to_blocks(dst->extent, dst->blk_w, dst->blk_h);
547    const VkExtent3D ext = radv_sdma_pixel_extent_to_blocks(px_extent, src->blk_w, src->blk_h);
548 
549    assert(util_is_power_of_two_nonzero(src->bpp));
550    assert(util_is_power_of_two_nonzero(dst->bpp));
551 
552    ASSERTED unsigned cdw_end = radeon_check_space(device->ws, cs, 15 + (dcc ? 3 : 0));
553 
554    radeon_emit(cs, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_T2T_SUB_WINDOW, 0) | dcc << 19 | dcc_dir << 31 |
555                       src->header_dword);
556    radeon_emit(cs, src->va);
557    radeon_emit(cs, src->va >> 32);
558    radeon_emit(cs, src_off.x | src_off.y << 16);
559    radeon_emit(cs, src_off.z | (src_ext.width - 1) << 16);
560    radeon_emit(cs, (src_ext.height - 1) | (src_ext.depth - 1) << 16);
561    radeon_emit(cs, src->info_dword);
562    radeon_emit(cs, dst->va);
563    radeon_emit(cs, dst->va >> 32);
564    radeon_emit(cs, dst_off.x | dst_off.y << 16);
565    radeon_emit(cs, dst_off.z | (dst_ext.width - 1) << 16);
566    radeon_emit(cs, (dst_ext.height - 1) | (dst_ext.depth - 1) << 16);
567    radeon_emit(cs, dst->info_dword);
568    radeon_emit(cs, (ext.width - 1) | (ext.height - 1) << 16);
569    radeon_emit(cs, (ext.depth - 1));
570 
571    if (dst->meta_va) {
572       const uint32_t write_compress_enable = 1;
573       radeon_emit(cs, dst->meta_va);
574       radeon_emit(cs, dst->meta_va >> 32);
575       radeon_emit(cs, dst->meta_config | write_compress_enable << 28);
576    } else if (src->meta_va) {
577       radeon_emit(cs, src->meta_va);
578       radeon_emit(cs, src->meta_va >> 32);
579       radeon_emit(cs, src->meta_config);
580    }
581 
582    assert(cs->cdw == cdw_end);
583 }
584 
585 void
radv_sdma_copy_buffer_image(const struct radv_device * device,struct radeon_cmdbuf * cs,const struct radv_sdma_surf * buf,const struct radv_sdma_surf * img,const VkExtent3D extent,bool to_image)586 radv_sdma_copy_buffer_image(const struct radv_device *device, struct radeon_cmdbuf *cs,
587                             const struct radv_sdma_surf *buf, const struct radv_sdma_surf *img, const VkExtent3D extent,
588                             bool to_image)
589 {
590    if (img->is_linear) {
591       if (to_image)
592          radv_sdma_emit_copy_linear_sub_window(device, cs, buf, img, extent);
593       else
594          radv_sdma_emit_copy_linear_sub_window(device, cs, img, buf, extent);
595    } else {
596       radv_sdma_emit_copy_tiled_sub_window(device, cs, img, buf, extent, !to_image);
597    }
598 }
599 
600 bool
radv_sdma_use_unaligned_buffer_image_copy(const struct radv_device * device,const struct radv_sdma_surf * buf,const struct radv_sdma_surf * img,const VkExtent3D ext)601 radv_sdma_use_unaligned_buffer_image_copy(const struct radv_device *device, const struct radv_sdma_surf *buf,
602                                           const struct radv_sdma_surf *img, const VkExtent3D ext)
603 {
604    const unsigned pitch_blocks = radv_sdma_pixels_to_blocks(buf->pitch, img->blk_w);
605    if (!util_is_aligned(pitch_blocks, radv_sdma_pitch_alignment(device, img->bpp)))
606       return true;
607 
608    const bool uses_depth = img->offset.z != 0 || ext.depth != 1;
609    if (!img->is_linear && uses_depth) {
610       const unsigned slice_pitch_blocks = radv_sdma_pixel_area_to_blocks(buf->slice_pitch, img->blk_w, img->blk_h);
611       if (!util_is_aligned(slice_pitch_blocks, 4))
612          return true;
613    }
614 
615    return false;
616 }
617 
618 void
radv_sdma_copy_buffer_image_unaligned(const struct radv_device * device,struct radeon_cmdbuf * cs,const struct radv_sdma_surf * buf,const struct radv_sdma_surf * img_in,const VkExtent3D base_extent,struct radeon_winsys_bo * temp_bo,bool to_image)619 radv_sdma_copy_buffer_image_unaligned(const struct radv_device *device, struct radeon_cmdbuf *cs,
620                                       const struct radv_sdma_surf *buf, const struct radv_sdma_surf *img_in,
621                                       const VkExtent3D base_extent, struct radeon_winsys_bo *temp_bo, bool to_image)
622 {
623    const struct radv_sdma_chunked_copy_info info = radv_sdma_get_chunked_copy_info(device, img_in, base_extent);
624    struct radv_sdma_surf img = *img_in;
625    struct radv_sdma_surf tmp = {
626       .va = temp_bo->va,
627       .bpp = img.bpp,
628       .blk_w = img.blk_w,
629       .blk_h = img.blk_h,
630       .pitch = info.aligned_row_pitch * img.blk_w,
631       .slice_pitch = info.aligned_row_pitch * img.blk_w * info.extent_vertical_blocks * img.blk_h,
632    };
633 
634    VkExtent3D extent = base_extent;
635    const unsigned buf_pitch_blocks = DIV_ROUND_UP(buf->pitch, img.blk_w);
636    const unsigned buf_slice_pitch_blocks = DIV_ROUND_UP(DIV_ROUND_UP(buf->slice_pitch, img.blk_w), img.blk_h);
637    assert(buf_pitch_blocks);
638    assert(buf_slice_pitch_blocks);
639    extent.depth = 1;
640 
641    for (unsigned slice = 0; slice < base_extent.depth; ++slice) {
642       for (unsigned row = 0; row < info.extent_vertical_blocks; row += info.num_rows_per_copy) {
643          const unsigned rows = MIN2(info.extent_vertical_blocks - row, info.num_rows_per_copy);
644 
645          img.offset.y = img_in->offset.y + row * img.blk_h;
646          img.offset.z = img_in->offset.z + slice;
647          extent.height = rows * img.blk_h;
648          tmp.slice_pitch = tmp.pitch * rows * img.blk_h;
649 
650          if (!to_image) {
651             /* Copy the rows from the source image to the temporary buffer. */
652             if (img.is_linear)
653                radv_sdma_emit_copy_linear_sub_window(device, cs, &img, &tmp, extent);
654             else
655                radv_sdma_emit_copy_tiled_sub_window(device, cs, &img, &tmp, extent, true);
656 
657             /* Wait for the copy to finish. */
658             radv_sdma_emit_nop(device, cs);
659          }
660 
661          /* buffer to image: copy each row from source buffer to temporary buffer.
662           * image to buffer: copy each row from temporary buffer to destination buffer.
663           */
664          for (unsigned r = 0; r < rows; ++r) {
665             const uint64_t buf_va =
666                buf->va + slice * buf_slice_pitch_blocks * img.bpp + (row + r) * buf_pitch_blocks * img.bpp;
667             const uint64_t tmp_va = tmp.va + r * info.aligned_row_pitch * img.bpp;
668             radv_sdma_copy_buffer(device, cs, to_image ? buf_va : tmp_va, to_image ? tmp_va : buf_va,
669                                   info.extent_horizontal_blocks * img.bpp);
670          }
671 
672          /* Wait for the copy to finish. */
673          radv_sdma_emit_nop(device, cs);
674 
675          if (to_image) {
676             /* Copy the rows from the temporary buffer to the destination image. */
677             if (img.is_linear)
678                radv_sdma_emit_copy_linear_sub_window(device, cs, &tmp, &img, extent);
679             else
680                radv_sdma_emit_copy_tiled_sub_window(device, cs, &img, &tmp, extent, false);
681 
682             /* Wait for the copy to finish. */
683             radv_sdma_emit_nop(device, cs);
684          }
685       }
686    }
687 }
688 
689 void
radv_sdma_copy_image(const struct radv_device * device,struct radeon_cmdbuf * cs,const struct radv_sdma_surf * src,const struct radv_sdma_surf * dst,const VkExtent3D extent)690 radv_sdma_copy_image(const struct radv_device *device, struct radeon_cmdbuf *cs, const struct radv_sdma_surf *src,
691                      const struct radv_sdma_surf *dst, const VkExtent3D extent)
692 {
693    if (src->is_linear) {
694       if (dst->is_linear) {
695          radv_sdma_emit_copy_linear_sub_window(device, cs, src, dst, extent);
696       } else {
697          radv_sdma_emit_copy_tiled_sub_window(device, cs, dst, src, extent, false);
698       }
699    } else {
700       if (dst->is_linear) {
701          radv_sdma_emit_copy_tiled_sub_window(device, cs, src, dst, extent, true);
702       } else {
703          radv_sdma_emit_copy_t2t_sub_window(device, cs, src, dst, extent);
704       }
705    }
706 }
707 
708 bool
radv_sdma_use_t2t_scanline_copy(const struct radv_device * device,const struct radv_sdma_surf * src,const struct radv_sdma_surf * dst,const VkExtent3D extent)709 radv_sdma_use_t2t_scanline_copy(const struct radv_device *device, const struct radv_sdma_surf *src,
710                                 const struct radv_sdma_surf *dst, const VkExtent3D extent)
711 {
712    /* These need a linear-to-linear / linear-to-tiled copy. */
713    if (src->is_linear || dst->is_linear)
714       return false;
715 
716    /* SDMA can't do format conversion. */
717    assert(src->bpp == dst->bpp);
718 
719    const struct radv_physical_device *pdev = radv_device_physical(device);
720    const enum sdma_version ver = pdev->info.sdma_ip_version;
721    if (ver < SDMA_5_0) {
722       /* SDMA v4.x and older doesn't support proper mip level selection. */
723       if (src->mip_levels > 1 || dst->mip_levels > 1)
724          return true;
725    }
726 
727    /* The two images can have a different block size,
728     * but must have the same swizzle mode.
729     */
730    if (src->micro_tile_mode != dst->micro_tile_mode)
731       return true;
732 
733    /* The T2T subwindow copy packet only has fields for one metadata configuration.
734     * It can either compress or decompress, or copy uncompressed images, but it
735     * can't copy from a compressed image to another.
736     */
737    if (src->meta_va && dst->meta_va)
738       return true;
739 
740    const bool needs_3d_alignment = src->is_3d && (src->micro_tile_mode == RADEON_MICRO_MODE_DISPLAY ||
741                                                   src->micro_tile_mode == RADEON_MICRO_MODE_STANDARD);
742    const unsigned log2bpp = util_logbase2(src->bpp);
743    const VkExtent3D *const alignment =
744       needs_3d_alignment ? &radv_sdma_t2t_alignment_3d[log2bpp] : &radv_sdma_t2t_alignment_2d_and_planar[log2bpp];
745 
746    const VkExtent3D copy_extent_blk = radv_sdma_pixel_extent_to_blocks(extent, src->blk_w, src->blk_h);
747    const VkOffset3D src_offset_blk = radv_sdma_pixel_offset_to_blocks(src->offset, src->blk_w, src->blk_h);
748    const VkOffset3D dst_offset_blk = radv_sdma_pixel_offset_to_blocks(dst->offset, dst->blk_w, dst->blk_h);
749 
750    if (!util_is_aligned(copy_extent_blk.width, alignment->width) ||
751        !util_is_aligned(copy_extent_blk.height, alignment->height) ||
752        !util_is_aligned(copy_extent_blk.depth, alignment->depth))
753       return true;
754 
755    if (!util_is_aligned(src_offset_blk.x, alignment->width) || !util_is_aligned(src_offset_blk.y, alignment->height) ||
756        !util_is_aligned(src_offset_blk.z, alignment->depth))
757       return true;
758 
759    if (!util_is_aligned(dst_offset_blk.x, alignment->width) || !util_is_aligned(dst_offset_blk.y, alignment->height) ||
760        !util_is_aligned(dst_offset_blk.z, alignment->depth))
761       return true;
762 
763    return false;
764 }
765 
766 void
radv_sdma_copy_image_t2t_scanline(const struct radv_device * device,struct radeon_cmdbuf * cs,const struct radv_sdma_surf * src,const struct radv_sdma_surf * dst,const VkExtent3D extent,struct radeon_winsys_bo * temp_bo)767 radv_sdma_copy_image_t2t_scanline(const struct radv_device *device, struct radeon_cmdbuf *cs,
768                                   const struct radv_sdma_surf *src, const struct radv_sdma_surf *dst,
769                                   const VkExtent3D extent, struct radeon_winsys_bo *temp_bo)
770 {
771    const struct radv_sdma_chunked_copy_info info = radv_sdma_get_chunked_copy_info(device, src, extent);
772    struct radv_sdma_surf t2l_src = *src;
773    struct radv_sdma_surf t2l_dst = {
774       .va = temp_bo->va,
775       .bpp = src->bpp,
776       .blk_w = src->blk_w,
777       .blk_h = src->blk_h,
778       .pitch = info.aligned_row_pitch * src->blk_w,
779    };
780    struct radv_sdma_surf l2t_dst = *dst;
781    struct radv_sdma_surf l2t_src = {
782       .va = temp_bo->va,
783       .bpp = dst->bpp,
784       .blk_w = dst->blk_w,
785       .blk_h = dst->blk_h,
786       .pitch = info.aligned_row_pitch * dst->blk_w,
787    };
788 
789    for (unsigned slice = 0; slice < extent.depth; ++slice) {
790       for (unsigned row = 0; row < info.extent_vertical_blocks; row += info.num_rows_per_copy) {
791          const unsigned rows = MIN2(info.extent_vertical_blocks - row, info.num_rows_per_copy);
792 
793          const VkExtent3D t2l_extent = {
794             .width = info.extent_horizontal_blocks * src->blk_w,
795             .height = rows * src->blk_h,
796             .depth = 1,
797          };
798 
799          t2l_src.offset.y = src->offset.y + row * src->blk_h;
800          t2l_src.offset.z = src->offset.z + slice;
801          t2l_dst.slice_pitch = t2l_dst.pitch * t2l_extent.height;
802 
803          radv_sdma_emit_copy_tiled_sub_window(device, cs, &t2l_src, &t2l_dst, t2l_extent, true);
804          radv_sdma_emit_nop(device, cs);
805 
806          const VkExtent3D l2t_extent = {
807             .width = info.extent_horizontal_blocks * dst->blk_w,
808             .height = rows * dst->blk_h,
809             .depth = 1,
810          };
811 
812          l2t_dst.offset.y = dst->offset.y + row * dst->blk_h;
813          l2t_dst.offset.z = dst->offset.z + slice;
814          l2t_src.slice_pitch = l2t_src.pitch * l2t_extent.height;
815 
816          radv_sdma_emit_copy_tiled_sub_window(device, cs, &l2t_dst, &l2t_src, l2t_extent, false);
817          radv_sdma_emit_nop(device, cs);
818       }
819    }
820 }
821