• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * SPDX-License-Identifier: MIT
5  */
6 
7 #include "si_pipe.h"
8 #include "util/format/u_format.h"
9 #include "util/format_srgb.h"
10 #include "util/u_helpers.h"
11 #include "util/hash_table.h"
12 
si_can_use_compute_blit(struct si_context * sctx,enum pipe_format format,unsigned num_samples,bool is_store,bool has_dcc)13 static bool si_can_use_compute_blit(struct si_context *sctx, enum pipe_format format,
14                                     unsigned num_samples, bool is_store, bool has_dcc)
15 {
16    /* TODO: This format fails AMD_TEST=imagecopy. */
17    if (format == PIPE_FORMAT_A8R8_UNORM && is_store)
18       return false;
19 
20    /* MSAA image stores are broken. AMD_DEBUG=nofmask fixes them, implying that the FMASK
21     * expand pass doesn't work, but let's use the gfx blit, which should be faster because
22     * it doesn't require expanding the FMASK.
23     *
24     * TODO: Broken MSAA stores can cause app issues, though this issue might only affect
25     *       internal blits, not sure.
26     *
27     * EQAA image stores are also unimplemented, which should be rejected here after MSAA
28     * image stores are fixed.
29     */
30    if (num_samples > 1 && is_store)
31       return false;
32 
33    if (util_format_is_depth_or_stencil(format))
34       return false;
35 
36    /* Image stores support DCC since GFX10. */
37    if (has_dcc && is_store && sctx->gfx_level < GFX10)
38       return false;
39 
40    return true;
41 }
42 
si_use_compute_copy_for_float_formats(struct si_context * sctx,struct pipe_resource * texture,unsigned level)43 static void si_use_compute_copy_for_float_formats(struct si_context *sctx,
44                                                   struct pipe_resource *texture,
45                                                   unsigned level)
46 {
47    struct si_texture *tex = (struct si_texture *)texture;
48 
49    /* If we are uploading into FP16 or R11G11B10_FLOAT via a blit, CB clobbers NaNs,
50     * so in order to preserve them exactly, we have to use the compute blit.
51     * The compute blit is used only when the destination doesn't have DCC, so
52     * disable it here, which is kinda a hack.
53     * If we are uploading into 32-bit floats with DCC via a blit, NaNs will also get
54     * lost so we need to disable DCC as well.
55     *
56     * This makes KHR-GL45.texture_view.view_classes pass on gfx9.
57     */
58    if (vi_dcc_enabled(tex, level) &&
59        util_format_is_float(texture->format) &&
60        /* Check if disabling DCC enables the compute copy. */
61        !si_can_use_compute_blit(sctx, texture->format, texture->nr_samples, true, true) &&
62        si_can_use_compute_blit(sctx, texture->format, texture->nr_samples, true, false)) {
63       si_texture_disable_dcc(sctx, tex);
64    }
65 }
66 
67 /* Determine the cache policy. */
get_cache_policy(struct si_context * sctx,enum si_coherency coher,uint64_t size)68 static enum si_cache_policy get_cache_policy(struct si_context *sctx, enum si_coherency coher,
69                                              uint64_t size)
70 {
71    if ((sctx->gfx_level >= GFX9 && (coher == SI_COHERENCY_CB_META ||
72                                      coher == SI_COHERENCY_DB_META ||
73                                      coher == SI_COHERENCY_CP)) ||
74        (sctx->gfx_level >= GFX7 && coher == SI_COHERENCY_SHADER))
75       return L2_LRU; /* it's faster if L2 doesn't evict anything  */
76 
77    return L2_BYPASS;
78 }
79 
si_get_flush_flags(struct si_context * sctx,enum si_coherency coher,enum si_cache_policy cache_policy)80 unsigned si_get_flush_flags(struct si_context *sctx, enum si_coherency coher,
81                             enum si_cache_policy cache_policy)
82 {
83    switch (coher) {
84    default:
85    case SI_COHERENCY_NONE:
86    case SI_COHERENCY_CP:
87       return 0;
88    case SI_COHERENCY_SHADER:
89       return SI_CONTEXT_INV_SCACHE | SI_CONTEXT_INV_VCACHE |
90              (cache_policy == L2_BYPASS ? SI_CONTEXT_INV_L2 : 0);
91    case SI_COHERENCY_CB_META:
92       return SI_CONTEXT_FLUSH_AND_INV_CB;
93    case SI_COHERENCY_DB_META:
94       return SI_CONTEXT_FLUSH_AND_INV_DB;
95    }
96 }
97 
si_is_buffer_idle(struct si_context * sctx,struct si_resource * buf,unsigned usage)98 static bool si_is_buffer_idle(struct si_context *sctx, struct si_resource *buf,
99                               unsigned usage)
100 {
101    return !si_cs_is_buffer_referenced(sctx, buf->buf, usage) &&
102           sctx->ws->buffer_wait(sctx->ws, buf->buf, 0, usage);
103 }
104 
si_improve_sync_flags(struct si_context * sctx,struct pipe_resource * dst,struct pipe_resource * src,unsigned * flags)105 static void si_improve_sync_flags(struct si_context *sctx, struct pipe_resource *dst,
106                                   struct pipe_resource *src, unsigned *flags)
107 {
108    if (dst->target != PIPE_BUFFER || (src && src->target != PIPE_BUFFER))
109       return;
110 
111    if (si_is_buffer_idle(sctx, si_resource(dst), RADEON_USAGE_READWRITE) &&
112        (!src || si_is_buffer_idle(sctx, si_resource(src), RADEON_USAGE_WRITE))) {
113       /* Idle buffers don't have to sync. */
114       *flags &= ~(SI_OP_SYNC_GE_BEFORE | SI_OP_SYNC_PS_BEFORE | SI_OP_SYNC_CS_BEFORE |
115                   SI_OP_SYNC_CPDMA_BEFORE);
116       return;
117    }
118 
119    const unsigned cs_mask = SI_BIND_CONSTANT_BUFFER(PIPE_SHADER_COMPUTE) |
120                             SI_BIND_SHADER_BUFFER(PIPE_SHADER_COMPUTE) |
121                             SI_BIND_IMAGE_BUFFER(PIPE_SHADER_COMPUTE) |
122                             SI_BIND_SAMPLER_BUFFER(PIPE_SHADER_COMPUTE);
123 
124    const unsigned ps_mask = SI_BIND_CONSTANT_BUFFER(PIPE_SHADER_FRAGMENT) |
125                             SI_BIND_SHADER_BUFFER(PIPE_SHADER_FRAGMENT) |
126                             SI_BIND_IMAGE_BUFFER(PIPE_SHADER_FRAGMENT) |
127                             SI_BIND_SAMPLER_BUFFER(PIPE_SHADER_FRAGMENT);
128 
129    unsigned bind_history = si_resource(dst)->bind_history |
130                            (src ? si_resource(src)->bind_history : 0);
131 
132    /* Clear SI_OP_SYNC_CS_BEFORE if the buffer has never been used with a CS. */
133    if (*flags & SI_OP_SYNC_CS_BEFORE && !(bind_history & cs_mask))
134       *flags &= ~SI_OP_SYNC_CS_BEFORE;
135 
136    /* Clear SI_OP_SYNC_PS_BEFORE if the buffer has never been used with a PS. */
137    if (*flags & SI_OP_SYNC_PS_BEFORE && !(bind_history & ps_mask)) {
138       *flags &= ~SI_OP_SYNC_PS_BEFORE;
139       *flags |= SI_OP_SYNC_GE_BEFORE;
140    }
141 }
142 
si_launch_grid_internal(struct si_context * sctx,const struct pipe_grid_info * info,void * shader,unsigned flags)143 static void si_launch_grid_internal(struct si_context *sctx, const struct pipe_grid_info *info,
144                                     void *shader, unsigned flags)
145 {
146    /* Wait for previous shaders to finish. */
147    if (flags & SI_OP_SYNC_GE_BEFORE)
148       sctx->flags |= SI_CONTEXT_VS_PARTIAL_FLUSH;
149 
150    if (flags & SI_OP_SYNC_PS_BEFORE)
151       sctx->flags |= SI_CONTEXT_PS_PARTIAL_FLUSH;
152 
153    if (flags & SI_OP_SYNC_CS_BEFORE)
154       sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH;
155 
156    /* Invalidate L0-L1 caches. */
157    /* sL0 is never invalidated, because src resources don't use it. */
158    if (!(flags & SI_OP_SKIP_CACHE_INV_BEFORE))
159       sctx->flags |= SI_CONTEXT_INV_VCACHE;
160 
161    /* Set settings for driver-internal compute dispatches. */
162    sctx->flags &= ~SI_CONTEXT_START_PIPELINE_STATS;
163    if (sctx->num_hw_pipestat_streamout_queries)
164       sctx->flags |= SI_CONTEXT_STOP_PIPELINE_STATS;
165 
166    if (sctx->flags)
167       si_mark_atom_dirty(sctx, &sctx->atoms.s.cache_flush);
168 
169    if (!(flags & SI_OP_CS_RENDER_COND_ENABLE))
170       sctx->render_cond_enabled = false;
171 
172    /* Force-disable fbfetch because there are unsolvable recursion problems. */
173    si_force_disable_ps_colorbuf0_slot(sctx);
174 
175    /* Skip decompression to prevent infinite recursion. */
176    sctx->blitter_running = true;
177 
178    /* Dispatch compute. */
179    void *saved_cs = sctx->cs_shader_state.program;
180    sctx->b.bind_compute_state(&sctx->b, shader);
181    sctx->b.launch_grid(&sctx->b, info);
182    sctx->b.bind_compute_state(&sctx->b, saved_cs);
183 
184    /* Restore default settings. */
185    sctx->flags &= ~SI_CONTEXT_STOP_PIPELINE_STATS;
186    if (sctx->num_hw_pipestat_streamout_queries)
187       sctx->flags |= SI_CONTEXT_START_PIPELINE_STATS;
188 
189    sctx->render_cond_enabled = sctx->render_cond;
190    sctx->blitter_running = false;
191 
192    /* We force-disabled fbfetch, so recompute the state. */
193    si_update_ps_colorbuf0_slot(sctx);
194 
195    if (flags & SI_OP_SYNC_AFTER) {
196       sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH;
197 
198       if (flags & SI_OP_CS_IMAGE) {
199          /* Make sure image stores are visible to CB, which doesn't use L2 on GFX6-8. */
200          sctx->flags |= sctx->gfx_level <= GFX8 ? SI_CONTEXT_WB_L2 : 0;
201          /* Make sure image stores are visible to all CUs. */
202          sctx->flags |= SI_CONTEXT_INV_VCACHE;
203          /* Make sure RBs see our DCC changes. */
204          if (sctx->gfx_level >= GFX10 && sctx->screen->info.tcc_rb_non_coherent) {
205             unsigned enabled_mask = sctx->images[PIPE_SHADER_COMPUTE].enabled_mask;
206             while (enabled_mask) {
207                int i = u_bit_scan(&enabled_mask);
208                if (sctx->images[PIPE_SHADER_COMPUTE].views[i].access & SI_IMAGE_ACCESS_ALLOW_DCC_STORE) {
209                   sctx->flags |= SI_CONTEXT_INV_L2;
210                   break;
211                }
212             }
213          }
214       } else {
215          /* Make sure buffer stores are visible to all CUs. */
216          sctx->flags |= SI_CONTEXT_INV_SCACHE | SI_CONTEXT_INV_VCACHE | SI_CONTEXT_PFP_SYNC_ME;
217       }
218    }
219 
220    if (sctx->flags)
221       si_mark_atom_dirty(sctx, &sctx->atoms.s.cache_flush);
222 }
223 
si_launch_grid_internal_ssbos(struct si_context * sctx,struct pipe_grid_info * info,void * shader,unsigned flags,enum si_coherency coher,unsigned num_buffers,const struct pipe_shader_buffer * buffers,unsigned writeable_bitmask)224 void si_launch_grid_internal_ssbos(struct si_context *sctx, struct pipe_grid_info *info,
225                                    void *shader, unsigned flags, enum si_coherency coher,
226                                    unsigned num_buffers, const struct pipe_shader_buffer *buffers,
227                                    unsigned writeable_bitmask)
228 {
229    if (!(flags & SI_OP_SKIP_CACHE_INV_BEFORE)) {
230       sctx->flags |= si_get_flush_flags(sctx, coher, SI_COMPUTE_DST_CACHE_POLICY);
231       si_mark_atom_dirty(sctx, &sctx->atoms.s.cache_flush);
232    }
233 
234    /* Save states. */
235    struct pipe_shader_buffer saved_sb[3] = {};
236    assert(num_buffers <= ARRAY_SIZE(saved_sb));
237    si_get_shader_buffers(sctx, PIPE_SHADER_COMPUTE, 0, num_buffers, saved_sb);
238 
239    unsigned saved_writable_mask = 0;
240    for (unsigned i = 0; i < num_buffers; i++) {
241       if (sctx->const_and_shader_buffers[PIPE_SHADER_COMPUTE].writable_mask &
242           (1u << si_get_shaderbuf_slot(i)))
243          saved_writable_mask |= 1 << i;
244    }
245 
246    /* Bind buffers and launch compute. */
247    si_set_shader_buffers(&sctx->b, PIPE_SHADER_COMPUTE, 0, num_buffers, buffers,
248                          writeable_bitmask,
249                          true /* don't update bind_history to prevent unnecessary syncs later */);
250    si_launch_grid_internal(sctx, info, shader, flags);
251 
252    /* Do cache flushing at the end. */
253    if (get_cache_policy(sctx, coher, 0) == L2_BYPASS) {
254       if (flags & SI_OP_SYNC_AFTER) {
255          sctx->flags |= SI_CONTEXT_WB_L2;
256          si_mark_atom_dirty(sctx, &sctx->atoms.s.cache_flush);
257       }
258    } else {
259       while (writeable_bitmask)
260          si_resource(buffers[u_bit_scan(&writeable_bitmask)].buffer)->TC_L2_dirty = true;
261    }
262 
263    /* Restore states. */
264    sctx->b.set_shader_buffers(&sctx->b, PIPE_SHADER_COMPUTE, 0, num_buffers, saved_sb,
265                               saved_writable_mask);
266    for (int i = 0; i < num_buffers; i++)
267       pipe_resource_reference(&saved_sb[i].buffer, NULL);
268 }
269 
270 /**
271  * Clear a buffer using read-modify-write with a 32-bit write bitmask.
272  * The clear value has 32 bits.
273  */
si_compute_clear_buffer_rmw(struct si_context * sctx,struct pipe_resource * dst,unsigned dst_offset,unsigned size,uint32_t clear_value,uint32_t writebitmask,unsigned flags,enum si_coherency coher)274 void si_compute_clear_buffer_rmw(struct si_context *sctx, struct pipe_resource *dst,
275                                  unsigned dst_offset, unsigned size,
276                                  uint32_t clear_value, uint32_t writebitmask,
277                                  unsigned flags, enum si_coherency coher)
278 {
279    assert(dst_offset % 4 == 0);
280    assert(size % 4 == 0);
281 
282    assert(dst->target != PIPE_BUFFER || dst_offset + size <= dst->width0);
283 
284    /* Use buffer_load_dwordx4 and buffer_store_dwordx4 per thread. */
285    unsigned dwords_per_instruction = 4;
286    unsigned block_size = 64; /* it's always 64x1x1 */
287    unsigned dwords_per_wave = dwords_per_instruction * block_size;
288 
289    unsigned num_dwords = size / 4;
290    unsigned num_instructions = DIV_ROUND_UP(num_dwords, dwords_per_instruction);
291 
292    struct pipe_grid_info info = {};
293    info.block[0] = MIN2(block_size, num_instructions);
294    info.block[1] = 1;
295    info.block[2] = 1;
296    info.grid[0] = DIV_ROUND_UP(num_dwords, dwords_per_wave);
297    info.grid[1] = 1;
298    info.grid[2] = 1;
299 
300    struct pipe_shader_buffer sb = {};
301    sb.buffer = dst;
302    sb.buffer_offset = dst_offset;
303    sb.buffer_size = size;
304 
305    sctx->cs_user_data[0] = clear_value & writebitmask;
306    sctx->cs_user_data[1] = ~writebitmask;
307 
308    if (!sctx->cs_clear_buffer_rmw)
309       sctx->cs_clear_buffer_rmw = si_create_clear_buffer_rmw_cs(sctx);
310 
311    si_launch_grid_internal_ssbos(sctx, &info, sctx->cs_clear_buffer_rmw, flags, coher,
312                                  1, &sb, 0x1);
313 }
314 
si_compute_clear_12bytes_buffer(struct si_context * sctx,struct pipe_resource * dst,unsigned dst_offset,unsigned size,const uint32_t * clear_value,unsigned flags,enum si_coherency coher)315 static void si_compute_clear_12bytes_buffer(struct si_context *sctx, struct pipe_resource *dst,
316                                             unsigned dst_offset, unsigned size,
317                                             const uint32_t *clear_value, unsigned flags,
318                                             enum si_coherency coher)
319 {
320    assert(dst_offset % 4 == 0);
321    assert(size % 4 == 0);
322    unsigned size_12 = DIV_ROUND_UP(size, 12);
323 
324    struct pipe_shader_buffer sb = {0};
325    sb.buffer = dst;
326    sb.buffer_offset = dst_offset;
327    sb.buffer_size = size;
328 
329    memcpy(sctx->cs_user_data, clear_value, 12);
330 
331    struct pipe_grid_info info = {0};
332 
333    if (!sctx->cs_clear_12bytes_buffer)
334       sctx->cs_clear_12bytes_buffer = si_clear_12bytes_buffer_shader(sctx);
335 
336    info.block[0] = 64;
337    info.last_block[0] = size_12 % 64;
338    info.block[1] = 1;
339    info.block[2] = 1;
340    info.grid[0] = DIV_ROUND_UP(size_12, 64);
341    info.grid[1] = 1;
342    info.grid[2] = 1;
343 
344    si_launch_grid_internal_ssbos(sctx, &info, sctx->cs_clear_12bytes_buffer, flags, coher,
345                                  1, &sb, 0x1);
346 }
347 
si_compute_do_clear_or_copy(struct si_context * sctx,struct pipe_resource * dst,unsigned dst_offset,struct pipe_resource * src,unsigned src_offset,unsigned size,const uint32_t * clear_value,unsigned clear_value_size,unsigned flags,enum si_coherency coher)348 static void si_compute_do_clear_or_copy(struct si_context *sctx, struct pipe_resource *dst,
349                                         unsigned dst_offset, struct pipe_resource *src,
350                                         unsigned src_offset, unsigned size,
351                                         const uint32_t *clear_value, unsigned clear_value_size,
352                                         unsigned flags, enum si_coherency coher)
353 {
354    assert(src_offset % 4 == 0);
355    assert(dst_offset % 4 == 0);
356    assert(size % 4 == 0);
357 
358    assert(dst->target != PIPE_BUFFER || dst_offset + size <= dst->width0);
359    assert(!src || src_offset + size <= src->width0);
360 
361    /* The memory accesses are coalesced, meaning that the 1st instruction writes
362     * the 1st contiguous block of data for the whole wave, the 2nd instruction
363     * writes the 2nd contiguous block of data, etc.
364     */
365    unsigned dwords_per_thread =
366       src ? SI_COMPUTE_COPY_DW_PER_THREAD : SI_COMPUTE_CLEAR_DW_PER_THREAD;
367    unsigned instructions_per_thread = MAX2(1, dwords_per_thread / 4);
368    unsigned dwords_per_instruction = dwords_per_thread / instructions_per_thread;
369    /* The shader declares the block size like this: */
370    unsigned block_size = si_determine_wave_size(sctx->screen, NULL);
371    unsigned dwords_per_wave = dwords_per_thread * block_size;
372 
373    unsigned num_dwords = size / 4;
374    unsigned num_instructions = DIV_ROUND_UP(num_dwords, dwords_per_instruction);
375 
376    struct pipe_grid_info info = {};
377    info.block[0] = MIN2(block_size, num_instructions);
378    info.block[1] = 1;
379    info.block[2] = 1;
380    info.grid[0] = DIV_ROUND_UP(num_dwords, dwords_per_wave);
381    info.grid[1] = 1;
382    info.grid[2] = 1;
383 
384    struct pipe_shader_buffer sb[2] = {};
385    sb[0].buffer = dst;
386    sb[0].buffer_offset = dst_offset;
387    sb[0].buffer_size = size;
388 
389    bool shader_dst_stream_policy = SI_COMPUTE_DST_CACHE_POLICY != L2_LRU;
390 
391    if (src) {
392       sb[1].buffer = src;
393       sb[1].buffer_offset = src_offset;
394       sb[1].buffer_size = size;
395 
396       if (!sctx->cs_copy_buffer) {
397          sctx->cs_copy_buffer = si_create_dma_compute_shader(
398             sctx, SI_COMPUTE_COPY_DW_PER_THREAD, shader_dst_stream_policy, true);
399       }
400 
401       si_launch_grid_internal_ssbos(sctx, &info, sctx->cs_copy_buffer, flags, coher,
402                                     2, sb, 0x1);
403    } else {
404       assert(clear_value_size >= 4 && clear_value_size <= 16 &&
405              util_is_power_of_two_or_zero(clear_value_size));
406 
407       for (unsigned i = 0; i < 4; i++)
408          sctx->cs_user_data[i] = clear_value[i % (clear_value_size / 4)];
409 
410       if (!sctx->cs_clear_buffer) {
411          sctx->cs_clear_buffer = si_create_dma_compute_shader(
412             sctx, SI_COMPUTE_CLEAR_DW_PER_THREAD, shader_dst_stream_policy, false);
413       }
414 
415       si_launch_grid_internal_ssbos(sctx, &info, sctx->cs_clear_buffer, flags, coher,
416                                     1, sb, 0x1);
417    }
418 }
419 
si_clear_buffer(struct si_context * sctx,struct pipe_resource * dst,uint64_t offset,uint64_t size,uint32_t * clear_value,uint32_t clear_value_size,unsigned flags,enum si_coherency coher,enum si_clear_method method)420 void si_clear_buffer(struct si_context *sctx, struct pipe_resource *dst,
421                      uint64_t offset, uint64_t size, uint32_t *clear_value,
422                      uint32_t clear_value_size, unsigned flags,
423                      enum si_coherency coher, enum si_clear_method method)
424 {
425    if (!size)
426       return;
427 
428    si_improve_sync_flags(sctx, dst, NULL, &flags);
429 
430    ASSERTED unsigned clear_alignment = MIN2(clear_value_size, 4);
431 
432    assert(clear_value_size != 3 && clear_value_size != 6); /* 12 is allowed. */
433    assert(offset % clear_alignment == 0);
434    assert(size % clear_alignment == 0);
435    assert(size < (UINT_MAX & ~0xf)); /* TODO: test 64-bit sizes in all codepaths */
436 
437    uint32_t clamped;
438    if (util_lower_clearsize_to_dword(clear_value, (int*)&clear_value_size, &clamped))
439       clear_value = &clamped;
440 
441    if (clear_value_size == 12) {
442       si_compute_clear_12bytes_buffer(sctx, dst, offset, size, clear_value, flags, coher);
443       return;
444    }
445 
446    uint64_t aligned_size = size & ~3ull;
447    if (aligned_size >= 4) {
448       uint64_t compute_min_size;
449 
450       if (sctx->gfx_level <= GFX8) {
451          /* CP DMA clears are terribly slow with GTT on GFX6-8, which can always
452           * happen due to BO evictions.
453           */
454          compute_min_size = 0;
455       } else {
456          /* Use a small enough size because CP DMA is slower than compute with bigger sizes. */
457          compute_min_size = 4 * 1024;
458       }
459 
460       /* TODO: use compute for unaligned big sizes */
461       if (method == SI_AUTO_SELECT_CLEAR_METHOD && (
462            clear_value_size > 4 ||
463            (clear_value_size == 4 && offset % 4 == 0 && size > compute_min_size))) {
464          method = SI_COMPUTE_CLEAR_METHOD;
465       }
466       if (method == SI_COMPUTE_CLEAR_METHOD) {
467          si_compute_do_clear_or_copy(sctx, dst, offset, NULL, 0, aligned_size, clear_value,
468                                      clear_value_size, flags, coher);
469       } else {
470          assert(clear_value_size == 4);
471          si_cp_dma_clear_buffer(sctx, &sctx->gfx_cs, dst, offset, aligned_size, *clear_value,
472                                 flags, coher, get_cache_policy(sctx, coher, size));
473       }
474 
475       offset += aligned_size;
476       size -= aligned_size;
477    }
478 
479    /* Handle non-dword alignment. */
480    if (size) {
481       assert(dst);
482       assert(dst->target == PIPE_BUFFER);
483       assert(size < 4);
484 
485       sctx->b.buffer_subdata(&sctx->b, dst,
486                              PIPE_MAP_WRITE |
487                              /* TC forbids drivers to invalidate buffers and infer unsynchronized mappings,
488                               * so suppress those optimizations. */
489                              (sctx->tc ? TC_TRANSFER_MAP_NO_INFER_UNSYNCHRONIZED |
490                                          TC_TRANSFER_MAP_NO_INVALIDATE : 0),
491                              offset, size, clear_value);
492    }
493 }
494 
si_pipe_clear_buffer(struct pipe_context * ctx,struct pipe_resource * dst,unsigned offset,unsigned size,const void * clear_value,int clear_value_size)495 static void si_pipe_clear_buffer(struct pipe_context *ctx, struct pipe_resource *dst,
496                                  unsigned offset, unsigned size, const void *clear_value,
497                                  int clear_value_size)
498 {
499    si_clear_buffer((struct si_context *)ctx, dst, offset, size, (uint32_t *)clear_value,
500                    clear_value_size, SI_OP_SYNC_BEFORE_AFTER, SI_COHERENCY_SHADER,
501                    SI_AUTO_SELECT_CLEAR_METHOD);
502 }
503 
si_copy_buffer(struct si_context * sctx,struct pipe_resource * dst,struct pipe_resource * src,uint64_t dst_offset,uint64_t src_offset,unsigned size,unsigned flags)504 void si_copy_buffer(struct si_context *sctx, struct pipe_resource *dst, struct pipe_resource *src,
505                     uint64_t dst_offset, uint64_t src_offset, unsigned size, unsigned flags)
506 {
507    if (!size)
508       return;
509 
510    enum si_coherency coher = SI_COHERENCY_SHADER;
511    enum si_cache_policy cache_policy = get_cache_policy(sctx, coher, size);
512    uint64_t compute_min_size = 8 * 1024;
513 
514    si_improve_sync_flags(sctx, dst, src, &flags);
515 
516    /* Only use compute for VRAM copies on dGPUs. */
517    /* TODO: use compute for unaligned big sizes */
518    if (sctx->screen->info.has_dedicated_vram && si_resource(dst)->domains & RADEON_DOMAIN_VRAM &&
519        si_resource(src)->domains & RADEON_DOMAIN_VRAM && size > compute_min_size &&
520        dst_offset % 4 == 0 && src_offset % 4 == 0 && size % 4 == 0) {
521       si_compute_do_clear_or_copy(sctx, dst, dst_offset, src, src_offset, size, NULL, 0,
522                                   flags, coher);
523    } else {
524       si_cp_dma_copy_buffer(sctx, dst, src, dst_offset, src_offset, size,
525                             flags, coher, cache_policy);
526    }
527 }
528 
si_compute_shorten_ubyte_buffer(struct si_context * sctx,struct pipe_resource * dst,struct pipe_resource * src,uint64_t dst_offset,uint64_t src_offset,unsigned size,unsigned flags)529 void si_compute_shorten_ubyte_buffer(struct si_context *sctx, struct pipe_resource *dst, struct pipe_resource *src,
530                                      uint64_t dst_offset, uint64_t src_offset, unsigned size, unsigned flags)
531 {
532    if (!size)
533       return;
534 
535    if (!sctx->cs_ubyte_to_ushort)
536       sctx->cs_ubyte_to_ushort = si_create_ubyte_to_ushort_compute_shader(sctx);
537 
538    /* Use COHERENCY_NONE to get SI_CONTEXT_WB_L2 automatically used in
539     * si_launch_grid_internal_ssbos.
540     */
541    enum si_coherency coher = SI_COHERENCY_NONE;
542 
543    si_improve_sync_flags(sctx, dst, src, &flags);
544 
545    struct pipe_grid_info info = {};
546    info.block[0] = si_determine_wave_size(sctx->screen, NULL);
547    info.block[1] = 1;
548    info.block[2] = 1;
549    info.grid[0] = DIV_ROUND_UP(size, info.block[0]);
550    info.grid[1] = 1;
551    info.grid[2] = 1;
552    info.last_block[0] = size % info.block[0];
553 
554    struct pipe_shader_buffer sb[2] = {};
555    sb[0].buffer = dst;
556    sb[0].buffer_offset = dst_offset;
557    sb[0].buffer_size = dst->width0;
558 
559    sb[1].buffer = src;
560    sb[1].buffer_offset = src_offset;
561    sb[1].buffer_size = src->width0;
562 
563    si_launch_grid_internal_ssbos(sctx, &info, sctx->cs_ubyte_to_ushort, flags, coher,
564                                  2, sb, 0x1);
565 }
566 
567 static unsigned
set_work_size(struct pipe_grid_info * info,unsigned block_x,unsigned block_y,unsigned block_z,unsigned work_x,unsigned work_y,unsigned work_z)568 set_work_size(struct pipe_grid_info *info, unsigned block_x, unsigned block_y, unsigned block_z,
569               unsigned work_x, unsigned work_y, unsigned work_z)
570 {
571    info->block[0] = block_x;
572    info->block[1] = block_y;
573    info->block[2] = block_z;
574 
575    unsigned work[3] = {work_x, work_y, work_z};
576    for (int i = 0; i < 3; ++i) {
577       info->last_block[i] = work[i] % info->block[i];
578       info->grid[i] = DIV_ROUND_UP(work[i], info->block[i]);
579    }
580 
581    return work_z > 1 ? 3 : (work_y > 1 ? 2 : 1);
582 }
583 
si_launch_grid_internal_images(struct si_context * sctx,struct pipe_image_view * images,unsigned num_images,const struct pipe_grid_info * info,void * shader,unsigned flags)584 static void si_launch_grid_internal_images(struct si_context *sctx,
585                                            struct pipe_image_view *images,
586                                            unsigned num_images,
587                                            const struct pipe_grid_info *info,
588                                            void *shader, unsigned flags)
589 {
590    struct pipe_image_view saved_image[2] = {};
591    assert(num_images <= ARRAY_SIZE(saved_image));
592 
593    for (unsigned i = 0; i < num_images; i++) {
594       assert(sctx->b.screen->is_format_supported(sctx->b.screen, images[i].format,
595                                                  images[i].resource->target,
596                                                  images[i].resource->nr_samples,
597                                                  images[i].resource->nr_storage_samples,
598                                                  PIPE_BIND_SHADER_IMAGE));
599 
600       /* Always allow DCC stores on gfx10+. */
601       if (sctx->gfx_level >= GFX10 &&
602           images[i].access & PIPE_IMAGE_ACCESS_WRITE &&
603           !(images[i].access & SI_IMAGE_ACCESS_DCC_OFF))
604          images[i].access |= SI_IMAGE_ACCESS_ALLOW_DCC_STORE;
605 
606       /* Simplify the format according to what image stores support. */
607       if (images[i].access & PIPE_IMAGE_ACCESS_WRITE) {
608          images[i].format = util_format_linear(images[i].format); /* SRGB not supported */
609          /* Keep L8A8 formats as-is because GFX7 is unable to store into R8A8 for some reason. */
610          images[i].format = util_format_intensity_to_red(images[i].format);
611          images[i].format = util_format_rgbx_to_rgba(images[i].format); /* prevent partial writes */
612       }
613 
614       /* Save the image. */
615       util_copy_image_view(&saved_image[i], &sctx->images[PIPE_SHADER_COMPUTE].views[i]);
616    }
617 
618    /* This might invoke DCC decompression, so do it first. */
619    sctx->b.set_shader_images(&sctx->b, PIPE_SHADER_COMPUTE, 0, num_images, 0, images);
620 
621    /* This should be done after set_shader_images. */
622    for (unsigned i = 0; i < num_images; i++) {
623       /* The driver doesn't decompress resources automatically here, so do it manually. */
624       si_decompress_subresource(&sctx->b, images[i].resource, PIPE_MASK_RGBAZS,
625                                 images[i].u.tex.level, images[i].u.tex.first_layer,
626                                 images[i].u.tex.last_layer,
627                                 images[i].access & PIPE_IMAGE_ACCESS_WRITE);
628    }
629 
630    /* This must be done before the compute shader. */
631    for (unsigned i = 0; i < num_images; i++) {
632       si_make_CB_shader_coherent(sctx, images[i].resource->nr_samples, true,
633             ((struct si_texture*)images[i].resource)->surface.u.gfx9.color.dcc.pipe_aligned);
634    }
635 
636    si_launch_grid_internal(sctx, info, shader, flags | SI_OP_CS_IMAGE);
637 
638    /* Restore images. */
639    sctx->b.set_shader_images(&sctx->b, PIPE_SHADER_COMPUTE, 0, num_images, 0, saved_image);
640    for (unsigned i = 0; i < num_images; i++)
641       pipe_resource_reference(&saved_image[i].resource, NULL);
642 }
643 
si_compute_copy_image(struct si_context * sctx,struct pipe_resource * dst,unsigned dst_level,struct pipe_resource * src,unsigned src_level,unsigned dstx,unsigned dsty,unsigned dstz,const struct pipe_box * src_box,unsigned flags)644 bool si_compute_copy_image(struct si_context *sctx, struct pipe_resource *dst, unsigned dst_level,
645                            struct pipe_resource *src, unsigned src_level, unsigned dstx,
646                            unsigned dsty, unsigned dstz, const struct pipe_box *src_box,
647                            unsigned flags)
648 {
649    struct si_texture *ssrc = (struct si_texture*)src;
650    struct si_texture *sdst = (struct si_texture*)dst;
651 
652    si_use_compute_copy_for_float_formats(sctx, dst, dst_level);
653 
654    /* The compute copy is mandatory for compressed and subsampled formats because the gfx copy
655     * doesn't support them. In all other cases, call si_can_use_compute_blit.
656     *
657     * The format is identical (we only need to check the src format) except compressed formats,
658     * which can be paired with an equivalent integer format.
659     */
660    if (!util_format_is_compressed(src->format) &&
661        !util_format_is_compressed(dst->format) &&
662        !util_format_is_subsampled_422(src->format)) {
663       bool src_can_use_compute_blit =
664          si_can_use_compute_blit(sctx, src->format, src->nr_samples, false,
665                                  vi_dcc_enabled(ssrc, src_level));
666 
667       if (!src_can_use_compute_blit)
668          return false;
669 
670       bool dst_can_use_compute_blit =
671          si_can_use_compute_blit(sctx, dst->format, dst->nr_samples, true,
672                                  vi_dcc_enabled(sdst, dst_level));
673 
674       if (!dst_can_use_compute_blit && !sctx->has_graphics &&
675           si_can_use_compute_blit(sctx, dst->format, dst->nr_samples, false,
676                                   vi_dcc_enabled(sdst, dst_level))) {
677          /* Non-graphics context don't have a blitter, so try harder to do
678           * a compute blit by disabling dcc on the destination texture.
679           */
680          dst_can_use_compute_blit = si_texture_disable_dcc(sctx, sdst);
681       }
682 
683       if (!dst_can_use_compute_blit)
684          return false;
685    }
686 
687    enum pipe_format src_format = util_format_linear(src->format);
688    enum pipe_format dst_format = util_format_linear(dst->format);
689    bool is_linear = ssrc->surface.is_linear || sdst->surface.is_linear;
690 
691    assert(util_format_is_subsampled_422(src_format) == util_format_is_subsampled_422(dst_format));
692 
693    /* Interpret as integer values to avoid NaN issues */
694    if (!vi_dcc_enabled(ssrc, src_level) &&
695        !vi_dcc_enabled(sdst, dst_level) &&
696        src_format == dst_format &&
697        util_format_is_float(src_format) &&
698        !util_format_is_compressed(src_format)) {
699       switch(util_format_get_blocksizebits(src_format)) {
700         case 16:
701           src_format = dst_format = PIPE_FORMAT_R16_UINT;
702           break;
703         case 32:
704           src_format = dst_format = PIPE_FORMAT_R32_UINT;
705           break;
706         case 64:
707           src_format = dst_format = PIPE_FORMAT_R32G32_UINT;
708           break;
709         case 128:
710           src_format = dst_format = PIPE_FORMAT_R32G32B32A32_UINT;
711           break;
712         default:
713           assert(false);
714       }
715    }
716 
717    /* Interpret compressed formats as UINT. */
718    struct pipe_box new_box;
719    unsigned src_access = 0, dst_access = 0;
720 
721    /* Note that staging copies do compressed<->UINT, so one of the formats is already UINT. */
722    if (util_format_is_compressed(src_format) || util_format_is_compressed(dst_format)) {
723       if (util_format_is_compressed(src_format))
724          src_access |= SI_IMAGE_ACCESS_BLOCK_FORMAT_AS_UINT;
725       if (util_format_is_compressed(dst_format))
726          dst_access |= SI_IMAGE_ACCESS_BLOCK_FORMAT_AS_UINT;
727 
728       dstx = util_format_get_nblocksx(dst_format, dstx);
729       dsty = util_format_get_nblocksy(dst_format, dsty);
730 
731       new_box.x = util_format_get_nblocksx(src_format, src_box->x);
732       new_box.y = util_format_get_nblocksy(src_format, src_box->y);
733       new_box.z = src_box->z;
734       new_box.width = util_format_get_nblocksx(src_format, src_box->width);
735       new_box.height = util_format_get_nblocksy(src_format, src_box->height);
736       new_box.depth = src_box->depth;
737       src_box = &new_box;
738 
739       if (ssrc->surface.bpe == 8)
740          src_format = dst_format = PIPE_FORMAT_R16G16B16A16_UINT; /* 64-bit block */
741       else
742          src_format = dst_format = PIPE_FORMAT_R32G32B32A32_UINT; /* 128-bit block */
743    }
744 
745    if (util_format_is_subsampled_422(src_format)) {
746       assert(src_format == dst_format);
747 
748       src_access |= SI_IMAGE_ACCESS_BLOCK_FORMAT_AS_UINT;
749       dst_access |= SI_IMAGE_ACCESS_BLOCK_FORMAT_AS_UINT;
750 
751       dstx = util_format_get_nblocksx(src_format, dstx);
752 
753       src_format = dst_format = PIPE_FORMAT_R32_UINT;
754 
755       /* Interpreting 422 subsampled format (16 bpp) as 32 bpp
756        * should force us to divide src_box->x, dstx and width by 2.
757        * But given that ac_surface allocates this format as 32 bpp
758        * and that surf_size is then modified to pack the values
759        * we must keep the original values to get the correct results.
760        */
761    }
762 
763    /* SNORM blitting has precision issues. Use the SINT equivalent instead, which doesn't
764     * force DCC decompression.
765     */
766    if (util_format_is_snorm(dst_format))
767       src_format = dst_format = util_format_snorm_to_sint(dst_format);
768 
769    if (src_box->width == 0 || src_box->height == 0 || src_box->depth == 0)
770       return true; /* success - nothing to do */
771 
772    struct pipe_image_view image[2] = {0};
773    image[0].resource = src;
774    image[0].shader_access = image[0].access = PIPE_IMAGE_ACCESS_READ | src_access;
775    image[0].format = src_format;
776    image[0].u.tex.level = src_level;
777    image[0].u.tex.first_layer = 0;
778    image[0].u.tex.last_layer = util_max_layer(src, src_level);
779    image[1].resource = dst;
780    image[1].shader_access = image[1].access = PIPE_IMAGE_ACCESS_WRITE | dst_access;
781    image[1].format = dst_format;
782    image[1].u.tex.level = dst_level;
783    image[1].u.tex.first_layer = 0;
784    image[1].u.tex.last_layer = util_max_layer(dst, dst_level);
785 
786    struct pipe_grid_info info = {0};
787 
788    bool dst_is_1d = dst->target == PIPE_TEXTURE_1D ||
789                     dst->target == PIPE_TEXTURE_1D_ARRAY;
790    bool src_is_1d = src->target == PIPE_TEXTURE_1D ||
791                     src->target == PIPE_TEXTURE_1D_ARRAY;
792    int block_x, block_y;
793    int block_z = 1;
794 
795    /* Choose the block dimensions based on the copy area size. */
796    if (src_box->height <= 4) {
797       block_y = util_next_power_of_two(src_box->height);
798       block_x = 64 / block_y;
799    } else if (src_box->width <= 4) {
800       block_x = util_next_power_of_two(src_box->width);
801       block_y = 64 / block_x;
802    } else if (is_linear) {
803       block_x = 64;
804       block_y = 1;
805    } else {
806       block_x = 8;
807       block_y = 8;
808    }
809 
810    sctx->cs_user_data[0] = src_box->x | (dstx << 16);
811    sctx->cs_user_data[1] = src_box->y | (dsty << 16);
812    sctx->cs_user_data[2] = src_box->z | (dstz << 16);
813 
814    unsigned wg_dim =
815       set_work_size(&info, block_x, block_y, block_z,
816                     src_box->width, src_box->height, src_box->depth);
817 
818    void **copy_image_cs_ptr = &sctx->cs_copy_image[wg_dim - 1][src_is_1d][dst_is_1d];
819    if (!*copy_image_cs_ptr)
820       *copy_image_cs_ptr = si_create_copy_image_cs(sctx, wg_dim, src_is_1d, dst_is_1d);
821 
822    assert(*copy_image_cs_ptr);
823 
824    si_launch_grid_internal_images(sctx, image, 2, &info, *copy_image_cs_ptr, flags);
825    return true;
826 }
827 
si_retile_dcc(struct si_context * sctx,struct si_texture * tex)828 void si_retile_dcc(struct si_context *sctx, struct si_texture *tex)
829 {
830    /* Set the DCC buffer. */
831    assert(tex->surface.meta_offset && tex->surface.meta_offset <= UINT_MAX);
832    assert(tex->surface.display_dcc_offset && tex->surface.display_dcc_offset <= UINT_MAX);
833    assert(tex->surface.display_dcc_offset < tex->surface.meta_offset);
834    assert(tex->buffer.bo_size <= UINT_MAX);
835 
836    struct pipe_shader_buffer sb = {};
837    sb.buffer = &tex->buffer.b.b;
838    sb.buffer_offset = tex->surface.display_dcc_offset;
839    sb.buffer_size = tex->buffer.bo_size - sb.buffer_offset;
840 
841    sctx->cs_user_data[0] = tex->surface.meta_offset - tex->surface.display_dcc_offset;
842    sctx->cs_user_data[1] = (tex->surface.u.gfx9.color.dcc_pitch_max + 1) |
843                            (tex->surface.u.gfx9.color.dcc_height << 16);
844    sctx->cs_user_data[2] = (tex->surface.u.gfx9.color.display_dcc_pitch_max + 1) |
845                            (tex->surface.u.gfx9.color.display_dcc_height << 16);
846 
847    /* We have only 1 variant per bpp for now, so expect 32 bpp. */
848    assert(tex->surface.bpe == 4);
849 
850    void **shader = &sctx->cs_dcc_retile[tex->surface.u.gfx9.swizzle_mode];
851    if (!*shader)
852       *shader = si_create_dcc_retile_cs(sctx, &tex->surface);
853 
854    /* Dispatch compute. */
855    unsigned width = DIV_ROUND_UP(tex->buffer.b.b.width0, tex->surface.u.gfx9.color.dcc_block_width);
856    unsigned height = DIV_ROUND_UP(tex->buffer.b.b.height0, tex->surface.u.gfx9.color.dcc_block_height);
857 
858    struct pipe_grid_info info = {};
859    info.block[0] = 8;
860    info.block[1] = 8;
861    info.block[2] = 1;
862    info.last_block[0] = width % info.block[0];
863    info.last_block[1] = height % info.block[1];
864    info.grid[0] = DIV_ROUND_UP(width, info.block[0]);
865    info.grid[1] = DIV_ROUND_UP(height, info.block[1]);
866    info.grid[2] = 1;
867 
868    si_launch_grid_internal_ssbos(sctx, &info, *shader, SI_OP_SYNC_BEFORE,
869                                  SI_COHERENCY_CB_META, 1, &sb, 0x1);
870 
871    /* Don't flush caches. L2 will be flushed by the kernel fence. */
872 }
873 
gfx9_clear_dcc_msaa(struct si_context * sctx,struct pipe_resource * res,uint32_t clear_value,unsigned flags,enum si_coherency coher)874 void gfx9_clear_dcc_msaa(struct si_context *sctx, struct pipe_resource *res, uint32_t clear_value,
875                          unsigned flags, enum si_coherency coher)
876 {
877    struct si_texture *tex = (struct si_texture*)res;
878 
879    assert(sctx->gfx_level < GFX11);
880 
881    /* Set the DCC buffer. */
882    assert(tex->surface.meta_offset && tex->surface.meta_offset <= UINT_MAX);
883    assert(tex->buffer.bo_size <= UINT_MAX);
884 
885    struct pipe_shader_buffer sb = {};
886    sb.buffer = &tex->buffer.b.b;
887    sb.buffer_offset = tex->surface.meta_offset;
888    sb.buffer_size = tex->buffer.bo_size - sb.buffer_offset;
889 
890    sctx->cs_user_data[0] = (tex->surface.u.gfx9.color.dcc_pitch_max + 1) |
891                            (tex->surface.u.gfx9.color.dcc_height << 16);
892    sctx->cs_user_data[1] = (clear_value & 0xffff) |
893                            ((uint32_t)tex->surface.tile_swizzle << 16);
894 
895    /* These variables identify the shader variant. */
896    unsigned swizzle_mode = tex->surface.u.gfx9.swizzle_mode;
897    unsigned bpe_log2 = util_logbase2(tex->surface.bpe);
898    unsigned log2_samples = util_logbase2(tex->buffer.b.b.nr_samples);
899    bool fragments8 = tex->buffer.b.b.nr_storage_samples == 8;
900    bool is_array = tex->buffer.b.b.array_size > 1;
901    void **shader = &sctx->cs_clear_dcc_msaa[swizzle_mode][bpe_log2][fragments8][log2_samples - 2][is_array];
902 
903    if (!*shader)
904       *shader = gfx9_create_clear_dcc_msaa_cs(sctx, tex);
905 
906    /* Dispatch compute. */
907    unsigned width = DIV_ROUND_UP(tex->buffer.b.b.width0, tex->surface.u.gfx9.color.dcc_block_width);
908    unsigned height = DIV_ROUND_UP(tex->buffer.b.b.height0, tex->surface.u.gfx9.color.dcc_block_height);
909    unsigned depth = DIV_ROUND_UP(tex->buffer.b.b.array_size, tex->surface.u.gfx9.color.dcc_block_depth);
910 
911    struct pipe_grid_info info = {};
912    info.block[0] = 8;
913    info.block[1] = 8;
914    info.block[2] = 1;
915    info.last_block[0] = width % info.block[0];
916    info.last_block[1] = height % info.block[1];
917    info.grid[0] = DIV_ROUND_UP(width, info.block[0]);
918    info.grid[1] = DIV_ROUND_UP(height, info.block[1]);
919    info.grid[2] = depth;
920 
921    si_launch_grid_internal_ssbos(sctx, &info, *shader, flags, coher, 1, &sb, 0x1);
922 }
923 
924 /* Expand FMASK to make it identity, so that image stores can ignore it. */
si_compute_expand_fmask(struct pipe_context * ctx,struct pipe_resource * tex)925 void si_compute_expand_fmask(struct pipe_context *ctx, struct pipe_resource *tex)
926 {
927    struct si_context *sctx = (struct si_context *)ctx;
928    bool is_array = tex->target == PIPE_TEXTURE_2D_ARRAY;
929    unsigned log_fragments = util_logbase2(tex->nr_storage_samples);
930    unsigned log_samples = util_logbase2(tex->nr_samples);
931    assert(tex->nr_samples >= 2);
932 
933    assert(sctx->gfx_level < GFX11);
934 
935    /* EQAA FMASK expansion is unimplemented. */
936    if (tex->nr_samples != tex->nr_storage_samples)
937       return;
938 
939    si_make_CB_shader_coherent(sctx, tex->nr_samples, true,
940                               ((struct si_texture*)tex)->surface.u.gfx9.color.dcc.pipe_aligned);
941 
942    /* Save states. */
943    struct pipe_image_view saved_image = {0};
944    util_copy_image_view(&saved_image, &sctx->images[PIPE_SHADER_COMPUTE].views[0]);
945 
946    /* Bind the image. */
947    struct pipe_image_view image = {0};
948    image.resource = tex;
949    /* Don't set WRITE so as not to trigger FMASK expansion, causing
950     * an infinite loop. */
951    image.shader_access = image.access = PIPE_IMAGE_ACCESS_READ;
952    image.format = util_format_linear(tex->format);
953    if (is_array)
954       image.u.tex.last_layer = tex->array_size - 1;
955 
956    ctx->set_shader_images(ctx, PIPE_SHADER_COMPUTE, 0, 1, 0, &image);
957 
958    /* Bind the shader. */
959    void **shader = &sctx->cs_fmask_expand[log_samples - 1][is_array];
960    if (!*shader)
961       *shader = si_create_fmask_expand_cs(sctx, tex->nr_samples, is_array);
962 
963    /* Dispatch compute. */
964    struct pipe_grid_info info = {0};
965    info.block[0] = 8;
966    info.last_block[0] = tex->width0 % 8;
967    info.block[1] = 8;
968    info.last_block[1] = tex->height0 % 8;
969    info.block[2] = 1;
970    info.grid[0] = DIV_ROUND_UP(tex->width0, 8);
971    info.grid[1] = DIV_ROUND_UP(tex->height0, 8);
972    info.grid[2] = is_array ? tex->array_size : 1;
973 
974    si_launch_grid_internal(sctx, &info, *shader, SI_OP_SYNC_BEFORE_AFTER);
975 
976    /* Restore previous states. */
977    ctx->set_shader_images(ctx, PIPE_SHADER_COMPUTE, 0, 1, 0, &saved_image);
978    pipe_resource_reference(&saved_image.resource, NULL);
979 
980    /* Array of fully expanded FMASK values, arranged by [log2(fragments)][log2(samples)-1]. */
981 #define INVALID 0 /* never used */
982    static const uint64_t fmask_expand_values[][4] = {
983       /* samples */
984       /* 2 (8 bpp) 4 (8 bpp)   8 (8-32bpp) 16 (16-64bpp)      fragments */
985       {0x02020202, 0x0E0E0E0E, 0xFEFEFEFE, 0xFFFEFFFE},      /* 1 */
986       {0x02020202, 0xA4A4A4A4, 0xAAA4AAA4, 0xAAAAAAA4},      /* 2 */
987       {INVALID, 0xE4E4E4E4, 0x44443210, 0x4444444444443210}, /* 4 */
988       {INVALID, INVALID, 0x76543210, 0x8888888876543210},    /* 8 */
989    };
990 
991    /* Clear FMASK to identity. */
992    struct si_texture *stex = (struct si_texture *)tex;
993    si_clear_buffer(sctx, tex, stex->surface.fmask_offset, stex->surface.fmask_size,
994                    (uint32_t *)&fmask_expand_values[log_fragments][log_samples - 1],
995                    log_fragments >= 2 && log_samples == 4 ? 8 : 4, SI_OP_SYNC_AFTER,
996                    SI_COHERENCY_SHADER, SI_AUTO_SELECT_CLEAR_METHOD);
997 }
998 
si_init_compute_blit_functions(struct si_context * sctx)999 void si_init_compute_blit_functions(struct si_context *sctx)
1000 {
1001    sctx->b.clear_buffer = si_pipe_clear_buffer;
1002 }
1003 
1004 /* Clear a region of a color surface to a constant value. */
si_compute_clear_render_target(struct pipe_context * ctx,struct pipe_surface * dstsurf,const union pipe_color_union * color,unsigned dstx,unsigned dsty,unsigned width,unsigned height,bool render_condition_enabled)1005 void si_compute_clear_render_target(struct pipe_context *ctx, struct pipe_surface *dstsurf,
1006                                     const union pipe_color_union *color, unsigned dstx,
1007                                     unsigned dsty, unsigned width, unsigned height,
1008                                     bool render_condition_enabled)
1009 {
1010    struct si_context *sctx = (struct si_context *)ctx;
1011    unsigned num_layers = dstsurf->u.tex.last_layer - dstsurf->u.tex.first_layer + 1;
1012    unsigned data[4 + sizeof(color->ui)] = {dstx, dsty, dstsurf->u.tex.first_layer, 0};
1013 
1014    if (width == 0 || height == 0)
1015       return;
1016 
1017    if (util_format_is_srgb(dstsurf->format)) {
1018       union pipe_color_union color_srgb;
1019       for (int i = 0; i < 3; i++)
1020          color_srgb.f[i] = util_format_linear_to_srgb_float(color->f[i]);
1021       color_srgb.f[3] = color->f[3];
1022       memcpy(data + 4, color_srgb.ui, sizeof(color->ui));
1023    } else {
1024       memcpy(data + 4, color->ui, sizeof(color->ui));
1025    }
1026 
1027    struct pipe_constant_buffer saved_cb = {};
1028    si_get_pipe_constant_buffer(sctx, PIPE_SHADER_COMPUTE, 0, &saved_cb);
1029 
1030    struct pipe_constant_buffer cb = {};
1031    cb.buffer_size = sizeof(data);
1032    cb.user_buffer = data;
1033    ctx->set_constant_buffer(ctx, PIPE_SHADER_COMPUTE, 0, false, &cb);
1034 
1035    struct pipe_image_view image = {0};
1036    image.resource = dstsurf->texture;
1037    image.shader_access = image.access = PIPE_IMAGE_ACCESS_WRITE;
1038    image.format = util_format_linear(dstsurf->format);
1039    image.u.tex.level = dstsurf->u.tex.level;
1040    image.u.tex.first_layer = 0; /* 3D images ignore first_layer (BASE_ARRAY) */
1041    image.u.tex.last_layer = dstsurf->u.tex.last_layer;
1042 
1043    struct pipe_grid_info info = {0};
1044    void *shader;
1045 
1046    if (dstsurf->texture->target != PIPE_TEXTURE_1D_ARRAY) {
1047       if (!sctx->cs_clear_render_target)
1048          sctx->cs_clear_render_target = si_clear_render_target_shader(sctx, PIPE_TEXTURE_2D_ARRAY);
1049       shader = sctx->cs_clear_render_target;
1050 
1051       info.block[0] = 8;
1052       info.last_block[0] = width % 8;
1053       info.block[1] = 8;
1054       info.last_block[1] = height % 8;
1055       info.block[2] = 1;
1056       info.grid[0] = DIV_ROUND_UP(width, 8);
1057       info.grid[1] = DIV_ROUND_UP(height, 8);
1058       info.grid[2] = num_layers;
1059    } else {
1060       if (!sctx->cs_clear_render_target_1d_array)
1061          sctx->cs_clear_render_target_1d_array = si_clear_render_target_shader(sctx, PIPE_TEXTURE_1D_ARRAY);
1062       shader = sctx->cs_clear_render_target_1d_array;
1063 
1064       info.block[0] = 64;
1065       info.last_block[0] = width % 64;
1066       info.block[1] = 1;
1067       info.block[2] = 1;
1068       info.grid[0] = DIV_ROUND_UP(width, 64);
1069       info.grid[1] = num_layers;
1070       info.grid[2] = 1;
1071    }
1072 
1073    si_launch_grid_internal_images(sctx, &image, 1, &info, shader,
1074                                   SI_OP_SYNC_BEFORE_AFTER |
1075                                   (render_condition_enabled ? SI_OP_CS_RENDER_COND_ENABLE : 0));
1076 
1077    ctx->set_constant_buffer(ctx, PIPE_SHADER_COMPUTE, 0, true, &saved_cb);
1078 }
1079 
1080 /* Return the last component that a compute blit should load and store. */
si_format_get_last_blit_component(enum pipe_format format,bool is_dst)1081 static unsigned si_format_get_last_blit_component(enum pipe_format format, bool is_dst)
1082 {
1083    const struct util_format_description *desc = util_format_description(format);
1084    unsigned num = 0;
1085 
1086    for (unsigned i = 1; i < 4; i++) {
1087       if (desc->swizzle[i] <= PIPE_SWIZZLE_W ||
1088           /* If the swizzle is 1 for dst, we need to store 1 explicitly.
1089            * The hardware stores 0 by default. */
1090           (is_dst && desc->swizzle[i] == PIPE_SWIZZLE_1))
1091          num = i;
1092    }
1093    return num;
1094 }
1095 
si_should_blit_clamp_xy(const struct pipe_blit_info * info)1096 static bool si_should_blit_clamp_xy(const struct pipe_blit_info *info)
1097 {
1098    int src_width = u_minify(info->src.resource->width0, info->src.level);
1099    int src_height = u_minify(info->src.resource->height0, info->src.level);
1100    struct pipe_box box = info->src.box;
1101 
1102    /* Eliminate negative width/height/depth. */
1103    if (box.width < 0) {
1104       box.x += box.width;
1105       box.width *= -1;
1106    }
1107    if (box.height < 0) {
1108       box.y += box.height;
1109       box.height *= -1;
1110    }
1111 
1112    bool in_bounds = box.x >= 0 && box.x < src_width &&
1113                     box.y >= 0 && box.y < src_height &&
1114                     box.x + box.width > 0 && box.x + box.width <= src_width &&
1115                     box.y + box.height > 0 && box.y + box.height <= src_height;
1116 
1117    /* Return if the box is not in bounds. */
1118    return !in_bounds;
1119 }
1120 
si_compute_blit(struct si_context * sctx,const struct pipe_blit_info * info,bool testing)1121 bool si_compute_blit(struct si_context *sctx, const struct pipe_blit_info *info, bool testing)
1122 {
1123    /* Compute blits require D16 right now (see the ISA).
1124     *
1125     * Testing on Navi21 showed that the compute blit is slightly slower than the gfx blit.
1126     * The compute blit is even slower with DCC stores. VP13 CATIA_plane_pencil is a good test
1127     * for that because it's mostly just blits.
1128     *
1129     * TODO: benchmark the performance on gfx11
1130     */
1131    if (sctx->gfx_level < GFX11 && !testing)
1132       return false;
1133 
1134    if (!si_can_use_compute_blit(sctx, info->dst.format, info->dst.resource->nr_samples, true,
1135                                 vi_dcc_enabled((struct si_texture*)info->dst.resource,
1136                                                info->dst.level)) ||
1137        !si_can_use_compute_blit(sctx, info->src.format, info->src.resource->nr_samples, false,
1138                                 vi_dcc_enabled((struct si_texture*)info->src.resource,
1139                                                info->src.level)))
1140       return false;
1141 
1142    if (info->alpha_blend ||
1143        info->num_window_rectangles ||
1144        info->scissor_enable ||
1145        /* No scaling. */
1146        info->dst.box.width != abs(info->src.box.width) ||
1147        info->dst.box.height != abs(info->src.box.height) ||
1148        info->dst.box.depth != abs(info->src.box.depth))
1149       return false;
1150 
1151    assert(info->src.box.depth >= 0);
1152 
1153    /* Shader images. */
1154    struct pipe_image_view image[2];
1155    image[0].resource = info->src.resource;
1156    image[0].shader_access = image[0].access = PIPE_IMAGE_ACCESS_READ;
1157    image[0].format = info->src.format;
1158    image[0].u.tex.level = info->src.level;
1159    image[0].u.tex.first_layer = 0;
1160    image[0].u.tex.last_layer = util_max_layer(info->src.resource, info->src.level);
1161 
1162    image[1].resource = info->dst.resource;
1163    image[1].shader_access = image[1].access = PIPE_IMAGE_ACCESS_WRITE;
1164    image[1].format = info->dst.format;
1165    image[1].u.tex.level = info->dst.level;
1166    image[1].u.tex.first_layer = 0;
1167    image[1].u.tex.last_layer = util_max_layer(info->dst.resource, info->dst.level);
1168 
1169    struct pipe_grid_info grid = {0};
1170    unsigned wg_dim =
1171       set_work_size(&grid, 8, 8, 1, info->dst.box.width, info->dst.box.height,
1172                     info->dst.box.depth);
1173 
1174    /* Get the shader key. */
1175    const struct util_format_description *dst_desc = util_format_description(info->dst.format);
1176    unsigned i = util_format_get_first_non_void_channel(info->dst.format);
1177    union si_compute_blit_shader_key options;
1178    options.key = 0;
1179 
1180    options.always_true = true;
1181    options.wg_dim = wg_dim;
1182    options.src_is_1d = info->src.resource->target == PIPE_TEXTURE_1D ||
1183                        info->src.resource->target == PIPE_TEXTURE_1D_ARRAY;
1184    options.dst_is_1d = info->dst.resource->target == PIPE_TEXTURE_1D ||
1185                        info->dst.resource->target == PIPE_TEXTURE_1D_ARRAY;
1186    options.src_is_msaa = info->src.resource->nr_samples > 1;
1187    options.dst_is_msaa = info->dst.resource->nr_samples > 1;
1188    /* Resolving integer formats only copies sample 0. log2_samples is then unused. */
1189    options.sample0_only = options.src_is_msaa && !options.dst_is_msaa &&
1190                           util_format_is_pure_integer(info->src.format);
1191    unsigned num_samples = MAX2(info->src.resource->nr_samples, info->dst.resource->nr_samples);
1192    options.log2_samples = options.sample0_only ? 0 : util_logbase2(num_samples);
1193    options.xy_clamp_to_edge = si_should_blit_clamp_xy(info);
1194    options.flip_x = info->src.box.width < 0;
1195    options.flip_y = info->src.box.height < 0;
1196    options.sint_to_uint = util_format_is_pure_sint(info->src.format) &&
1197                           util_format_is_pure_uint(info->dst.format);
1198    options.uint_to_sint = util_format_is_pure_uint(info->src.format) &&
1199                           util_format_is_pure_sint(info->dst.format);
1200    options.dst_is_srgb = util_format_is_srgb(info->dst.format);
1201    options.last_dst_channel = si_format_get_last_blit_component(info->dst.format, true);
1202    options.last_src_channel = MIN2(si_format_get_last_blit_component(info->src.format, false),
1203                                    options.last_dst_channel);
1204    options.use_integer_one = util_format_is_pure_integer(info->dst.format) &&
1205                              options.last_src_channel < options.last_dst_channel &&
1206                              options.last_dst_channel == 3;
1207 
1208    /* WARNING: We need this option for AMD_TEST to get results identical with the gfx blit,
1209     * otherwise we wouldn't be able to fully validate whether everything else works.
1210     * The test expects that the behavior is identical to u_blitter.
1211     *
1212     * Additionally, we need to keep this enabled even when not testing because not doing fp16_rtz
1213     * breaks "piglit/bin/texsubimage -auto pbo".
1214     */
1215    options.fp16_rtz = !util_format_is_pure_integer(info->dst.format) &&
1216                       dst_desc->channel[i].size <= 10;
1217 
1218    struct hash_entry *entry = _mesa_hash_table_search(sctx->cs_blit_shaders,
1219                                                       (void*)(uintptr_t)options.key);
1220    void *shader = entry ? entry->data : NULL;
1221    if (!shader) {
1222       shader = si_create_blit_cs(sctx, &options);
1223       _mesa_hash_table_insert(sctx->cs_blit_shaders,
1224                               (void*)(uintptr_t)options.key, shader);
1225    }
1226 
1227    sctx->cs_user_data[0] = (info->src.box.x & 0xffff) | ((info->dst.box.x & 0xffff) << 16);
1228    sctx->cs_user_data[1] = (info->src.box.y & 0xffff) | ((info->dst.box.y & 0xffff) << 16);
1229    sctx->cs_user_data[2] = (info->src.box.z & 0xffff) | ((info->dst.box.z & 0xffff) << 16);
1230 
1231    si_launch_grid_internal_images(sctx, image, 2, &grid, shader,
1232                                   SI_OP_SYNC_BEFORE_AFTER |
1233                                   (info->render_condition_enable ? SI_OP_CS_RENDER_COND_ENABLE : 0));
1234    return true;
1235 }
1236