• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * on the rights to use, copy, modify, merge, publish, distribute, sub
9  * license, and/or sell copies of the Software, and to permit persons to whom
10  * the Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22  * USE OR OTHER DEALINGS IN THE SOFTWARE.
23  *
24  */
25 
26 #include "si_pipe.h"
27 #include "util/format/u_format.h"
28 #include "util/format_srgb.h"
29 #include "util/u_helpers.h"
30 
si_can_use_compute_blit(struct si_context * sctx,enum pipe_format format,unsigned num_samples,bool is_store,bool has_dcc)31 static bool si_can_use_compute_blit(struct si_context *sctx, enum pipe_format format,
32                                     unsigned num_samples, bool is_store, bool has_dcc)
33 {
34    /* TODO: This format fails AMD_TEST=imagecopy. */
35    if (format == PIPE_FORMAT_A8R8_UNORM && is_store)
36       return false;
37 
38    if (num_samples > 1)
39       return false;
40 
41    if (util_format_is_depth_or_stencil(format))
42       return false;
43 
44    /* Image stores support DCC since GFX10. */
45    if (has_dcc && is_store && sctx->gfx_level < GFX10)
46       return false;
47 
48    return true;
49 }
50 
si_use_compute_copy_for_float_formats(struct si_context * sctx,struct pipe_resource * texture,unsigned level)51 static void si_use_compute_copy_for_float_formats(struct si_context *sctx,
52                                                   struct pipe_resource *texture,
53                                                   unsigned level)
54 {
55    struct si_texture *tex = (struct si_texture *)texture;
56 
57    /* If we are uploading into FP16 or R11G11B10_FLOAT via a blit, CB clobbers NaNs,
58     * so in order to preserve them exactly, we have to use the compute blit.
59     * The compute blit is used only when the destination doesn't have DCC, so
60     * disable it here, which is kinda a hack.
61     * If we are uploading into 32-bit floats with DCC via a blit, NaNs will also get
62     * lost so we need to disable DCC as well.
63     *
64     * This makes KHR-GL45.texture_view.view_classes pass on gfx9.
65     */
66    if (vi_dcc_enabled(tex, level) &&
67        util_format_is_float(texture->format) &&
68        /* Check if disabling DCC enables the compute copy. */
69        !si_can_use_compute_blit(sctx, texture->format, texture->nr_samples, true, true) &&
70        si_can_use_compute_blit(sctx, texture->format, texture->nr_samples, true, false)) {
71       si_texture_disable_dcc(sctx, tex);
72    }
73 }
74 
75 /* Determine the cache policy. */
get_cache_policy(struct si_context * sctx,enum si_coherency coher,uint64_t size)76 static enum si_cache_policy get_cache_policy(struct si_context *sctx, enum si_coherency coher,
77                                              uint64_t size)
78 {
79    if ((sctx->gfx_level >= GFX9 && (coher == SI_COHERENCY_CB_META ||
80                                      coher == SI_COHERENCY_DB_META ||
81                                      coher == SI_COHERENCY_CP)) ||
82        (sctx->gfx_level >= GFX7 && coher == SI_COHERENCY_SHADER))
83       return L2_LRU; /* it's faster if L2 doesn't evict anything  */
84 
85    return L2_BYPASS;
86 }
87 
si_get_flush_flags(struct si_context * sctx,enum si_coherency coher,enum si_cache_policy cache_policy)88 unsigned si_get_flush_flags(struct si_context *sctx, enum si_coherency coher,
89                             enum si_cache_policy cache_policy)
90 {
91    switch (coher) {
92    default:
93    case SI_COHERENCY_NONE:
94    case SI_COHERENCY_CP:
95       return 0;
96    case SI_COHERENCY_SHADER:
97       return SI_CONTEXT_INV_SCACHE | SI_CONTEXT_INV_VCACHE |
98              (cache_policy == L2_BYPASS ? SI_CONTEXT_INV_L2 : 0);
99    case SI_COHERENCY_CB_META:
100       return SI_CONTEXT_FLUSH_AND_INV_CB;
101    case SI_COHERENCY_DB_META:
102       return SI_CONTEXT_FLUSH_AND_INV_DB;
103    }
104 }
105 
si_is_buffer_idle(struct si_context * sctx,struct si_resource * buf,unsigned usage)106 static bool si_is_buffer_idle(struct si_context *sctx, struct si_resource *buf,
107                               unsigned usage)
108 {
109    return !si_cs_is_buffer_referenced(sctx, buf->buf, usage) &&
110           sctx->ws->buffer_wait(sctx->ws, buf->buf, 0, usage);
111 }
112 
si_improve_sync_flags(struct si_context * sctx,struct pipe_resource * dst,struct pipe_resource * src,unsigned * flags)113 static void si_improve_sync_flags(struct si_context *sctx, struct pipe_resource *dst,
114                                   struct pipe_resource *src, unsigned *flags)
115 {
116    if (dst->target != PIPE_BUFFER || (src && src->target != PIPE_BUFFER))
117       return;
118 
119    if (si_is_buffer_idle(sctx, si_resource(dst), RADEON_USAGE_READWRITE) &&
120        (!src || si_is_buffer_idle(sctx, si_resource(src), RADEON_USAGE_WRITE))) {
121       /* Idle buffers don't have to sync. */
122       *flags &= ~(SI_OP_SYNC_GE_BEFORE | SI_OP_SYNC_PS_BEFORE | SI_OP_SYNC_CS_BEFORE |
123                   SI_OP_SYNC_CPDMA_BEFORE);
124       return;
125    }
126 
127    const unsigned cs_mask = SI_BIND_CONSTANT_BUFFER(PIPE_SHADER_COMPUTE) |
128                             SI_BIND_SHADER_BUFFER(PIPE_SHADER_COMPUTE) |
129                             SI_BIND_IMAGE_BUFFER(PIPE_SHADER_COMPUTE) |
130                             SI_BIND_SAMPLER_BUFFER(PIPE_SHADER_COMPUTE);
131 
132    const unsigned ps_mask = SI_BIND_CONSTANT_BUFFER(PIPE_SHADER_FRAGMENT) |
133                             SI_BIND_SHADER_BUFFER(PIPE_SHADER_FRAGMENT) |
134                             SI_BIND_IMAGE_BUFFER(PIPE_SHADER_FRAGMENT) |
135                             SI_BIND_SAMPLER_BUFFER(PIPE_SHADER_FRAGMENT);
136 
137    unsigned bind_history = si_resource(dst)->bind_history |
138                            (src ? si_resource(src)->bind_history : 0);
139 
140    /* Clear SI_OP_SYNC_CS_BEFORE if the buffer has never been used with a CS. */
141    if (*flags & SI_OP_SYNC_CS_BEFORE && !(bind_history & cs_mask))
142       *flags &= ~SI_OP_SYNC_CS_BEFORE;
143 
144    /* Clear SI_OP_SYNC_PS_BEFORE if the buffer has never been used with a PS. */
145    if (*flags & SI_OP_SYNC_PS_BEFORE && !(bind_history & ps_mask)) {
146       *flags &= ~SI_OP_SYNC_PS_BEFORE;
147       *flags |= SI_OP_SYNC_GE_BEFORE;
148    }
149 }
150 
si_launch_grid_internal(struct si_context * sctx,const struct pipe_grid_info * info,void * shader,unsigned flags)151 static void si_launch_grid_internal(struct si_context *sctx, const struct pipe_grid_info *info,
152                                     void *shader, unsigned flags)
153 {
154    /* Wait for previous shaders to finish. */
155    if (flags & SI_OP_SYNC_GE_BEFORE)
156       sctx->flags |= SI_CONTEXT_VS_PARTIAL_FLUSH;
157 
158    if (flags & SI_OP_SYNC_PS_BEFORE)
159       sctx->flags |= SI_CONTEXT_PS_PARTIAL_FLUSH;
160 
161    if (flags & SI_OP_SYNC_CS_BEFORE)
162       sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH;
163 
164    if (!(flags & SI_OP_CS_IMAGE))
165       sctx->flags |= SI_CONTEXT_PFP_SYNC_ME;
166 
167    /* Invalidate L0-L1 caches. */
168    /* sL0 is never invalidated, because src resources don't use it. */
169    if (!(flags & SI_OP_SKIP_CACHE_INV_BEFORE))
170       sctx->flags |= SI_CONTEXT_INV_VCACHE;
171 
172    /* Set settings for driver-internal compute dispatches. */
173    sctx->flags &= ~SI_CONTEXT_START_PIPELINE_STATS;
174    sctx->flags |= SI_CONTEXT_STOP_PIPELINE_STATS;
175 
176    if (!(flags & SI_OP_CS_RENDER_COND_ENABLE))
177       sctx->render_cond_enabled = false;
178 
179    /* Skip decompression to prevent infinite recursion. */
180    sctx->blitter_running = true;
181 
182    /* Dispatch compute. */
183    void *saved_cs = sctx->cs_shader_state.program;
184    sctx->b.bind_compute_state(&sctx->b, shader);
185    sctx->b.launch_grid(&sctx->b, info);
186    sctx->b.bind_compute_state(&sctx->b, saved_cs);
187 
188    /* Restore default settings. */
189    sctx->flags &= ~SI_CONTEXT_STOP_PIPELINE_STATS;
190    sctx->flags |= SI_CONTEXT_START_PIPELINE_STATS;
191    sctx->render_cond_enabled = sctx->render_cond;
192    sctx->blitter_running = false;
193 
194    if (flags & SI_OP_SYNC_AFTER) {
195       sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH;
196 
197       if (flags & SI_OP_CS_IMAGE) {
198          /* Make sure image stores are visible to CB, which doesn't use L2 on GFX6-8. */
199          sctx->flags |= sctx->gfx_level <= GFX8 ? SI_CONTEXT_WB_L2 : 0;
200          /* Make sure image stores are visible to all CUs. */
201          sctx->flags |= SI_CONTEXT_INV_VCACHE;
202          /* Make sure RBs see our DCC changes. */
203          if (sctx->gfx_level >= GFX10 && sctx->screen->info.tcc_rb_non_coherent) {
204             unsigned enabled_mask = sctx->images[PIPE_SHADER_COMPUTE].enabled_mask;
205             while (enabled_mask) {
206                int i = u_bit_scan(&enabled_mask);
207                if (sctx->images[PIPE_SHADER_COMPUTE].views[i].access & SI_IMAGE_ACCESS_ALLOW_DCC_STORE) {
208                   sctx->flags |= SI_CONTEXT_INV_L2;
209                   break;
210                }
211             }
212          }
213       } else {
214          /* Make sure buffer stores are visible to all CUs. */
215          sctx->flags |= SI_CONTEXT_INV_SCACHE | SI_CONTEXT_INV_VCACHE | SI_CONTEXT_PFP_SYNC_ME;
216       }
217    }
218 }
219 
si_launch_grid_internal_ssbos(struct si_context * sctx,struct pipe_grid_info * info,void * shader,unsigned flags,enum si_coherency coher,unsigned num_buffers,const struct pipe_shader_buffer * buffers,unsigned writeable_bitmask)220 void si_launch_grid_internal_ssbos(struct si_context *sctx, struct pipe_grid_info *info,
221                                    void *shader, unsigned flags, enum si_coherency coher,
222                                    unsigned num_buffers, const struct pipe_shader_buffer *buffers,
223                                    unsigned writeable_bitmask)
224 {
225    if (!(flags & SI_OP_SKIP_CACHE_INV_BEFORE))
226       sctx->flags |= si_get_flush_flags(sctx, coher, SI_COMPUTE_DST_CACHE_POLICY);
227 
228    /* Save states. */
229    struct pipe_shader_buffer saved_sb[3] = {};
230    assert(num_buffers <= ARRAY_SIZE(saved_sb));
231    si_get_shader_buffers(sctx, PIPE_SHADER_COMPUTE, 0, num_buffers, saved_sb);
232 
233    unsigned saved_writable_mask = 0;
234    for (unsigned i = 0; i < num_buffers; i++) {
235       if (sctx->const_and_shader_buffers[PIPE_SHADER_COMPUTE].writable_mask &
236           (1u << si_get_shaderbuf_slot(i)))
237          saved_writable_mask |= 1 << i;
238    }
239 
240    /* Bind buffers and launch compute. */
241    si_set_shader_buffers(&sctx->b, PIPE_SHADER_COMPUTE, 0, num_buffers, buffers,
242                          writeable_bitmask,
243                          true /* don't update bind_history to prevent unnecessary syncs later */);
244    si_launch_grid_internal(sctx, info, shader, flags);
245 
246    /* Do cache flushing at the end. */
247    if (get_cache_policy(sctx, coher, 0) == L2_BYPASS) {
248       if (flags & SI_OP_SYNC_AFTER)
249          sctx->flags |= SI_CONTEXT_WB_L2;
250    } else {
251       while (writeable_bitmask)
252          si_resource(buffers[u_bit_scan(&writeable_bitmask)].buffer)->TC_L2_dirty = true;
253    }
254 
255    /* Restore states. */
256    sctx->b.set_shader_buffers(&sctx->b, PIPE_SHADER_COMPUTE, 0, num_buffers, saved_sb,
257                               saved_writable_mask);
258    for (int i = 0; i < num_buffers; i++)
259       pipe_resource_reference(&saved_sb[i].buffer, NULL);
260 }
261 
262 /**
263  * Clear a buffer using read-modify-write with a 32-bit write bitmask.
264  * The clear value has 32 bits.
265  */
si_compute_clear_buffer_rmw(struct si_context * sctx,struct pipe_resource * dst,unsigned dst_offset,unsigned size,uint32_t clear_value,uint32_t writebitmask,unsigned flags,enum si_coherency coher)266 void si_compute_clear_buffer_rmw(struct si_context *sctx, struct pipe_resource *dst,
267                                  unsigned dst_offset, unsigned size,
268                                  uint32_t clear_value, uint32_t writebitmask,
269                                  unsigned flags, enum si_coherency coher)
270 {
271    assert(dst_offset % 4 == 0);
272    assert(size % 4 == 0);
273 
274    assert(dst->target != PIPE_BUFFER || dst_offset + size <= dst->width0);
275 
276    /* Use buffer_load_dwordx4 and buffer_store_dwordx4 per thread. */
277    unsigned dwords_per_instruction = 4;
278    unsigned block_size = 64; /* it's always 64x1x1 */
279    unsigned dwords_per_wave = dwords_per_instruction * block_size;
280 
281    unsigned num_dwords = size / 4;
282    unsigned num_instructions = DIV_ROUND_UP(num_dwords, dwords_per_instruction);
283 
284    struct pipe_grid_info info = {};
285    info.block[0] = MIN2(block_size, num_instructions);
286    info.block[1] = 1;
287    info.block[2] = 1;
288    info.grid[0] = DIV_ROUND_UP(num_dwords, dwords_per_wave);
289    info.grid[1] = 1;
290    info.grid[2] = 1;
291 
292    struct pipe_shader_buffer sb = {};
293    sb.buffer = dst;
294    sb.buffer_offset = dst_offset;
295    sb.buffer_size = size;
296 
297    sctx->cs_user_data[0] = clear_value & writebitmask;
298    sctx->cs_user_data[1] = ~writebitmask;
299 
300    if (!sctx->cs_clear_buffer_rmw)
301       sctx->cs_clear_buffer_rmw = si_create_clear_buffer_rmw_cs(sctx);
302 
303    si_launch_grid_internal_ssbos(sctx, &info, sctx->cs_clear_buffer_rmw, flags, coher,
304                                  1, &sb, 0x1);
305 }
306 
si_compute_clear_12bytes_buffer(struct si_context * sctx,struct pipe_resource * dst,unsigned dst_offset,unsigned size,const uint32_t * clear_value,unsigned flags,enum si_coherency coher)307 static void si_compute_clear_12bytes_buffer(struct si_context *sctx, struct pipe_resource *dst,
308                                             unsigned dst_offset, unsigned size,
309                                             const uint32_t *clear_value, unsigned flags,
310                                             enum si_coherency coher)
311 {
312    struct pipe_context *ctx = &sctx->b;
313 
314    assert(dst_offset % 4 == 0);
315    assert(size % 4 == 0);
316    unsigned size_12 = DIV_ROUND_UP(size, 12);
317 
318    struct pipe_shader_buffer sb = {0};
319    sb.buffer = dst;
320    sb.buffer_offset = dst_offset;
321    sb.buffer_size = size;
322 
323    memcpy(sctx->cs_user_data, clear_value, 12);
324 
325    struct pipe_grid_info info = {0};
326 
327    if (!sctx->cs_clear_12bytes_buffer)
328       sctx->cs_clear_12bytes_buffer = si_clear_12bytes_buffer_shader(ctx);
329 
330    info.block[0] = 64;
331    info.last_block[0] = size_12 % 64;
332    info.block[1] = 1;
333    info.block[2] = 1;
334    info.grid[0] = DIV_ROUND_UP(size_12, 64);
335    info.grid[1] = 1;
336    info.grid[2] = 1;
337 
338    si_launch_grid_internal_ssbos(sctx, &info, sctx->cs_clear_12bytes_buffer, flags, coher,
339                                  1, &sb, 0x1);
340 }
341 
si_compute_do_clear_or_copy(struct si_context * sctx,struct pipe_resource * dst,unsigned dst_offset,struct pipe_resource * src,unsigned src_offset,unsigned size,const uint32_t * clear_value,unsigned clear_value_size,unsigned flags,enum si_coherency coher)342 static void si_compute_do_clear_or_copy(struct si_context *sctx, struct pipe_resource *dst,
343                                         unsigned dst_offset, struct pipe_resource *src,
344                                         unsigned src_offset, unsigned size,
345                                         const uint32_t *clear_value, unsigned clear_value_size,
346                                         unsigned flags, enum si_coherency coher)
347 {
348    assert(src_offset % 4 == 0);
349    assert(dst_offset % 4 == 0);
350    assert(size % 4 == 0);
351 
352    assert(dst->target != PIPE_BUFFER || dst_offset + size <= dst->width0);
353    assert(!src || src_offset + size <= src->width0);
354 
355    /* The memory accesses are coalesced, meaning that the 1st instruction writes
356     * the 1st contiguous block of data for the whole wave, the 2nd instruction
357     * writes the 2nd contiguous block of data, etc.
358     */
359    unsigned dwords_per_thread =
360       src ? SI_COMPUTE_COPY_DW_PER_THREAD : SI_COMPUTE_CLEAR_DW_PER_THREAD;
361    unsigned instructions_per_thread = MAX2(1, dwords_per_thread / 4);
362    unsigned dwords_per_instruction = dwords_per_thread / instructions_per_thread;
363    /* The shader declares the block size like this: */
364    unsigned block_size = si_determine_wave_size(sctx->screen, NULL);
365    unsigned dwords_per_wave = dwords_per_thread * block_size;
366 
367    unsigned num_dwords = size / 4;
368    unsigned num_instructions = DIV_ROUND_UP(num_dwords, dwords_per_instruction);
369 
370    struct pipe_grid_info info = {};
371    info.block[0] = MIN2(block_size, num_instructions);
372    info.block[1] = 1;
373    info.block[2] = 1;
374    info.grid[0] = DIV_ROUND_UP(num_dwords, dwords_per_wave);
375    info.grid[1] = 1;
376    info.grid[2] = 1;
377 
378    struct pipe_shader_buffer sb[2] = {};
379    sb[0].buffer = dst;
380    sb[0].buffer_offset = dst_offset;
381    sb[0].buffer_size = size;
382 
383    bool shader_dst_stream_policy = SI_COMPUTE_DST_CACHE_POLICY != L2_LRU;
384 
385    if (src) {
386       sb[1].buffer = src;
387       sb[1].buffer_offset = src_offset;
388       sb[1].buffer_size = size;
389 
390       if (!sctx->cs_copy_buffer) {
391          sctx->cs_copy_buffer = si_create_dma_compute_shader(
392             &sctx->b, SI_COMPUTE_COPY_DW_PER_THREAD, shader_dst_stream_policy, true);
393       }
394 
395       si_launch_grid_internal_ssbos(sctx, &info, sctx->cs_copy_buffer, flags, coher,
396                                     2, sb, 0x1);
397    } else {
398       assert(clear_value_size >= 4 && clear_value_size <= 16 &&
399              util_is_power_of_two_or_zero(clear_value_size));
400 
401       for (unsigned i = 0; i < 4; i++)
402          sctx->cs_user_data[i] = clear_value[i % (clear_value_size / 4)];
403 
404       if (!sctx->cs_clear_buffer) {
405          sctx->cs_clear_buffer = si_create_dma_compute_shader(
406             &sctx->b, SI_COMPUTE_CLEAR_DW_PER_THREAD, shader_dst_stream_policy, false);
407       }
408 
409       si_launch_grid_internal_ssbos(sctx, &info, sctx->cs_clear_buffer, flags, coher,
410                                     1, sb, 0x1);
411    }
412 }
413 
si_clear_buffer(struct si_context * sctx,struct pipe_resource * dst,uint64_t offset,uint64_t size,uint32_t * clear_value,uint32_t clear_value_size,unsigned flags,enum si_coherency coher,enum si_clear_method method)414 void si_clear_buffer(struct si_context *sctx, struct pipe_resource *dst,
415                      uint64_t offset, uint64_t size, uint32_t *clear_value,
416                      uint32_t clear_value_size, unsigned flags,
417                      enum si_coherency coher, enum si_clear_method method)
418 {
419    if (!size)
420       return;
421 
422    si_improve_sync_flags(sctx, dst, NULL, &flags);
423 
424    ASSERTED unsigned clear_alignment = MIN2(clear_value_size, 4);
425 
426    assert(clear_value_size != 3 && clear_value_size != 6); /* 12 is allowed. */
427    assert(offset % clear_alignment == 0);
428    assert(size % clear_alignment == 0);
429    assert(size < (UINT_MAX & ~0xf)); /* TODO: test 64-bit sizes in all codepaths */
430 
431    uint32_t clamped;
432    if (util_lower_clearsize_to_dword(clear_value, (int*)&clear_value_size, &clamped))
433       clear_value = &clamped;
434 
435    if (clear_value_size == 12) {
436       si_compute_clear_12bytes_buffer(sctx, dst, offset, size, clear_value, flags, coher);
437       return;
438    }
439 
440    uint64_t aligned_size = size & ~3ull;
441    if (aligned_size >= 4) {
442       uint64_t compute_min_size;
443 
444       if (sctx->gfx_level <= GFX8) {
445          /* CP DMA clears are terribly slow with GTT on GFX6-8, which can always
446           * happen due to BO evictions.
447           */
448          compute_min_size = 0;
449       } else {
450          /* Use a small enough size because CP DMA is slower than compute with bigger sizes. */
451          compute_min_size = 4 * 1024;
452       }
453 
454       /* TODO: use compute for unaligned big sizes */
455       if (method == SI_AUTO_SELECT_CLEAR_METHOD && (
456            clear_value_size > 4 ||
457            (clear_value_size == 4 && offset % 4 == 0 && size > compute_min_size))) {
458          method = SI_COMPUTE_CLEAR_METHOD;
459       }
460       if (method == SI_COMPUTE_CLEAR_METHOD) {
461          si_compute_do_clear_or_copy(sctx, dst, offset, NULL, 0, aligned_size, clear_value,
462                                      clear_value_size, flags, coher);
463       } else {
464          assert(clear_value_size == 4);
465          si_cp_dma_clear_buffer(sctx, &sctx->gfx_cs, dst, offset, aligned_size, *clear_value,
466                                 flags, coher, get_cache_policy(sctx, coher, size));
467       }
468 
469       offset += aligned_size;
470       size -= aligned_size;
471    }
472 
473    /* Handle non-dword alignment. */
474    if (size) {
475       assert(dst);
476       assert(dst->target == PIPE_BUFFER);
477       assert(size < 4);
478 
479       sctx->b.buffer_subdata(&sctx->b, dst,
480                              PIPE_MAP_WRITE |
481                              /* TC forbids drivers to invalidate buffers and infer unsychronized mappings,
482                               * so suppress those optimizations. */
483                              (sctx->tc ? TC_TRANSFER_MAP_NO_INFER_UNSYNCHRONIZED |
484                                          TC_TRANSFER_MAP_NO_INVALIDATE : 0),
485                              offset, size, clear_value);
486    }
487 }
488 
si_screen_clear_buffer(struct si_screen * sscreen,struct pipe_resource * dst,uint64_t offset,uint64_t size,unsigned value,unsigned flags)489 void si_screen_clear_buffer(struct si_screen *sscreen, struct pipe_resource *dst, uint64_t offset,
490                             uint64_t size, unsigned value, unsigned flags)
491 {
492    struct si_context *ctx = si_get_aux_context(sscreen);
493    si_clear_buffer(ctx, dst, offset, size, &value, 4, flags,
494                    SI_COHERENCY_SHADER, SI_AUTO_SELECT_CLEAR_METHOD);
495    si_put_aux_context_flush(sscreen);
496 }
497 
si_pipe_clear_buffer(struct pipe_context * ctx,struct pipe_resource * dst,unsigned offset,unsigned size,const void * clear_value,int clear_value_size)498 static void si_pipe_clear_buffer(struct pipe_context *ctx, struct pipe_resource *dst,
499                                  unsigned offset, unsigned size, const void *clear_value,
500                                  int clear_value_size)
501 {
502    si_clear_buffer((struct si_context *)ctx, dst, offset, size, (uint32_t *)clear_value,
503                    clear_value_size, SI_OP_SYNC_BEFORE_AFTER, SI_COHERENCY_SHADER,
504                    SI_AUTO_SELECT_CLEAR_METHOD);
505 }
506 
si_copy_buffer(struct si_context * sctx,struct pipe_resource * dst,struct pipe_resource * src,uint64_t dst_offset,uint64_t src_offset,unsigned size,unsigned flags)507 void si_copy_buffer(struct si_context *sctx, struct pipe_resource *dst, struct pipe_resource *src,
508                     uint64_t dst_offset, uint64_t src_offset, unsigned size, unsigned flags)
509 {
510    if (!size)
511       return;
512 
513    enum si_coherency coher = SI_COHERENCY_SHADER;
514    enum si_cache_policy cache_policy = get_cache_policy(sctx, coher, size);
515    uint64_t compute_min_size = 8 * 1024;
516 
517    si_improve_sync_flags(sctx, dst, src, &flags);
518 
519    /* Only use compute for VRAM copies on dGPUs. */
520    /* TODO: use compute for unaligned big sizes */
521    if (sctx->screen->info.has_dedicated_vram && si_resource(dst)->domains & RADEON_DOMAIN_VRAM &&
522        si_resource(src)->domains & RADEON_DOMAIN_VRAM && size > compute_min_size &&
523        dst_offset % 4 == 0 && src_offset % 4 == 0 && size % 4 == 0) {
524       si_compute_do_clear_or_copy(sctx, dst, dst_offset, src, src_offset, size, NULL, 0,
525                                   flags, coher);
526    } else {
527       si_cp_dma_copy_buffer(sctx, dst, src, dst_offset, src_offset, size,
528                             flags, coher, cache_policy);
529    }
530 }
531 
532 static void
set_work_size(struct pipe_grid_info * info,unsigned block_x,unsigned block_y,unsigned block_z,unsigned work_x,unsigned work_y,unsigned work_z)533 set_work_size(struct pipe_grid_info *info, unsigned block_x, unsigned block_y, unsigned block_z,
534               unsigned work_x, unsigned work_y, unsigned work_z)
535 {
536    info->block[0] = block_x;
537    info->block[1] = block_y;
538    info->block[2] = block_z;
539 
540    unsigned work[3] = {work_x, work_y, work_z};
541    for (int i = 0; i < 3; ++i) {
542       info->last_block[i] = work[i] % info->block[i];
543       info->grid[i] = DIV_ROUND_UP(work[i], info->block[i]);
544    }
545 }
546 
si_launch_grid_internal_images(struct si_context * sctx,struct pipe_image_view * images,unsigned num_images,const struct pipe_grid_info * info,void * shader,unsigned flags)547 static void si_launch_grid_internal_images(struct si_context *sctx,
548                                            struct pipe_image_view *images,
549                                            unsigned num_images,
550                                            const struct pipe_grid_info *info,
551                                            void *shader, unsigned flags)
552 {
553    struct pipe_image_view saved_image[2] = {};
554    assert(num_images <= ARRAY_SIZE(saved_image));
555 
556    for (unsigned i = 0; i < num_images; i++) {
557       assert(sctx->b.screen->is_format_supported(sctx->b.screen, images[i].format,
558                                                  images[i].resource->target,
559                                                  images[i].resource->nr_samples,
560                                                  images[i].resource->nr_storage_samples,
561                                                  PIPE_BIND_SHADER_IMAGE));
562 
563       /* Always allow DCC stores on gfx10+. */
564       if (sctx->gfx_level >= GFX10 &&
565           images[i].access & PIPE_IMAGE_ACCESS_WRITE &&
566           !(images[i].access & SI_IMAGE_ACCESS_DCC_OFF))
567          images[i].access |= SI_IMAGE_ACCESS_ALLOW_DCC_STORE;
568 
569       /* Simplify the format according to what image stores support. */
570       if (images[i].access & PIPE_IMAGE_ACCESS_WRITE) {
571          images[i].format = util_format_linear(images[i].format); /* SRGB not supported */
572          images[i].format = util_format_luminance_to_red(images[i].format);
573          images[i].format = util_format_intensity_to_red(images[i].format);
574          images[i].format = util_format_rgbx_to_rgba(images[i].format); /* prevent partial writes */
575       }
576 
577       /* Save the image. */
578       util_copy_image_view(&saved_image[i], &sctx->images[PIPE_SHADER_COMPUTE].views[i]);
579    }
580 
581    /* This might invoke DCC decompression, so do it first. */
582    sctx->b.set_shader_images(&sctx->b, PIPE_SHADER_COMPUTE, 0, num_images, 0, images);
583 
584    /* This should be done after set_shader_images. */
585    for (unsigned i = 0; i < num_images; i++) {
586       /* The driver doesn't decompress resources automatically here, so do it manually. */
587       si_decompress_subresource(&sctx->b, images[i].resource, PIPE_MASK_RGBAZS,
588                                 images[i].u.tex.level, images[i].u.tex.first_layer,
589                                 images[i].u.tex.last_layer,
590                                 images[i].access & PIPE_IMAGE_ACCESS_WRITE);
591    }
592 
593    /* This must be done before the compute shader. */
594    for (unsigned i = 0; i < num_images; i++) {
595       si_make_CB_shader_coherent(sctx, images[i].resource->nr_samples, true,
596             ((struct si_texture*)images[i].resource)->surface.u.gfx9.color.dcc.pipe_aligned);
597    }
598 
599    si_launch_grid_internal(sctx, info, shader, flags | SI_OP_CS_IMAGE);
600 
601    /* Restore images. */
602    sctx->b.set_shader_images(&sctx->b, PIPE_SHADER_COMPUTE, 0, num_images, 0, saved_image);
603    for (unsigned i = 0; i < num_images; i++)
604       pipe_resource_reference(&saved_image[i].resource, NULL);
605 }
606 
si_compute_copy_image(struct si_context * sctx,struct pipe_resource * dst,unsigned dst_level,struct pipe_resource * src,unsigned src_level,unsigned dstx,unsigned dsty,unsigned dstz,const struct pipe_box * src_box,unsigned flags)607 bool si_compute_copy_image(struct si_context *sctx, struct pipe_resource *dst, unsigned dst_level,
608                            struct pipe_resource *src, unsigned src_level, unsigned dstx,
609                            unsigned dsty, unsigned dstz, const struct pipe_box *src_box,
610                            unsigned flags)
611 {
612    struct si_texture *ssrc = (struct si_texture*)src;
613    struct si_texture *sdst = (struct si_texture*)dst;
614 
615    si_use_compute_copy_for_float_formats(sctx, dst, dst_level);
616 
617    /* The compute copy is mandatory for compressed and subsampled formats because the gfx copy
618     * doesn't support them. In all other cases, call si_can_use_compute_blit.
619     *
620     * The format is identical (we only need to check the src format) except compressed formats,
621     * which can be paired with an equivalent integer format.
622     */
623    if (!util_format_is_compressed(src->format) &&
624        !util_format_is_compressed(dst->format) &&
625        !util_format_is_subsampled_422(src->format) &&
626        (!si_can_use_compute_blit(sctx, dst->format, dst->nr_samples, true,
627                                  vi_dcc_enabled(sdst, dst_level)) ||
628         !si_can_use_compute_blit(sctx, src->format, src->nr_samples, false,
629                                  vi_dcc_enabled(ssrc, src_level))))
630       return false;
631 
632    enum pipe_format src_format = util_format_linear(src->format);
633    enum pipe_format dst_format = util_format_linear(dst->format);
634    bool is_linear = ssrc->surface.is_linear || sdst->surface.is_linear;
635 
636    assert(util_format_is_subsampled_422(src_format) == util_format_is_subsampled_422(dst_format));
637 
638    /* Interpret as integer values to avoid NaN issues */
639    if (!vi_dcc_enabled(ssrc, src_level) &&
640        !vi_dcc_enabled(sdst, dst_level) &&
641        src_format == dst_format &&
642        util_format_is_float(src_format) &&
643        !util_format_is_compressed(src_format)) {
644       switch(util_format_get_blocksizebits(src_format)) {
645         case 16:
646           src_format = dst_format = PIPE_FORMAT_R16_UINT;
647           break;
648         case 32:
649           src_format = dst_format = PIPE_FORMAT_R32_UINT;
650           break;
651         case 64:
652           src_format = dst_format = PIPE_FORMAT_R32G32_UINT;
653           break;
654         case 128:
655           src_format = dst_format = PIPE_FORMAT_R32G32B32A32_UINT;
656           break;
657         default:
658           assert(false);
659       }
660    }
661 
662    /* Interpret compressed formats as UINT. */
663    struct pipe_box new_box;
664    unsigned src_access = 0, dst_access = 0;
665 
666    /* Note that staging copies do compressed<->UINT, so one of the formats is already UINT. */
667    if (util_format_is_compressed(src_format) || util_format_is_compressed(dst_format)) {
668       if (util_format_is_compressed(src_format))
669          src_access |= SI_IMAGE_ACCESS_BLOCK_FORMAT_AS_UINT;
670       if (util_format_is_compressed(dst_format))
671          dst_access |= SI_IMAGE_ACCESS_BLOCK_FORMAT_AS_UINT;
672 
673       dstx = util_format_get_nblocksx(dst_format, dstx);
674       dsty = util_format_get_nblocksy(dst_format, dsty);
675 
676       new_box.x = util_format_get_nblocksx(src_format, src_box->x);
677       new_box.y = util_format_get_nblocksy(src_format, src_box->y);
678       new_box.z = src_box->z;
679       new_box.width = util_format_get_nblocksx(src_format, src_box->width);
680       new_box.height = util_format_get_nblocksy(src_format, src_box->height);
681       new_box.depth = src_box->depth;
682       src_box = &new_box;
683 
684       if (ssrc->surface.bpe == 8)
685          src_format = dst_format = PIPE_FORMAT_R16G16B16A16_UINT; /* 64-bit block */
686       else
687          src_format = dst_format = PIPE_FORMAT_R32G32B32A32_UINT; /* 128-bit block */
688    }
689 
690    if (util_format_is_subsampled_422(src_format)) {
691       assert(src_format == dst_format);
692 
693       src_access |= SI_IMAGE_ACCESS_BLOCK_FORMAT_AS_UINT;
694       dst_access |= SI_IMAGE_ACCESS_BLOCK_FORMAT_AS_UINT;
695 
696       dstx = util_format_get_nblocksx(src_format, dstx);
697 
698       new_box = *src_box;
699       new_box.x = util_format_get_nblocksx(src_format, src_box->x);
700       new_box.width = util_format_get_nblocksx(src_format, src_box->width);
701       src_box = &new_box;
702 
703       src_format = dst_format = PIPE_FORMAT_R32_UINT;
704 
705       /* Interpreting 422 subsampled format (16 bpp) as 32 bpp
706        * should force us to divide src_box->x, dstx and width by 2.
707        * But given that ac_surface allocates this format as 32 bpp
708        * and that surf_size is then modified to pack the values
709        * we must keep the original values to get the correct results.
710        */
711    }
712 
713    /* SNORM blitting has precision issues. Use the SINT equivalent instead, which doesn't
714     * force DCC decompression.
715     */
716    if (util_format_is_snorm(dst_format))
717       src_format = dst_format = util_format_snorm_to_sint(dst_format);
718 
719    if (src_box->width == 0 || src_box->height == 0 || src_box->depth == 0)
720       return true; /* success - nothing to do */
721 
722    struct pipe_image_view image[2] = {0};
723    image[0].resource = src;
724    image[0].shader_access = image[0].access = PIPE_IMAGE_ACCESS_READ | src_access;
725    image[0].format = src_format;
726    image[0].u.tex.level = src_level;
727    image[0].u.tex.first_layer = 0;
728    image[0].u.tex.last_layer = util_max_layer(src, src_level);
729    image[1].resource = dst;
730    image[1].shader_access = image[1].access = PIPE_IMAGE_ACCESS_WRITE | dst_access;
731    image[1].format = dst_format;
732    image[1].u.tex.level = dst_level;
733    image[1].u.tex.first_layer = 0;
734    image[1].u.tex.last_layer = util_max_layer(dst, dst_level);
735 
736    struct pipe_grid_info info = {0};
737 
738    bool dst_is_1d = dst->target == PIPE_TEXTURE_1D ||
739                     dst->target == PIPE_TEXTURE_1D_ARRAY;
740    bool src_is_1d = src->target == PIPE_TEXTURE_1D ||
741                     src->target == PIPE_TEXTURE_1D_ARRAY;
742    int block_x, block_y;
743    int block_z = 1;
744 
745    /* Choose the block dimensions based on the copy area size. */
746    if (src_box->height <= 4) {
747       block_y = util_next_power_of_two(src_box->height);
748       block_x = 64 / block_y;
749    } else if (src_box->width <= 4) {
750       block_x = util_next_power_of_two(src_box->width);
751       block_y = 64 / block_x;
752    } else if (is_linear) {
753       block_x = 64;
754       block_y = 1;
755    } else {
756       block_x = 8;
757       block_y = 8;
758    }
759 
760    sctx->cs_user_data[0] = src_box->x | (dstx << 16);
761    sctx->cs_user_data[1] = src_box->y | (dsty << 16);
762    sctx->cs_user_data[2] = src_box->z | (dstz << 16);
763 
764    set_work_size(&info, block_x, block_y, block_z,
765                  src_box->width, src_box->height, src_box->depth);
766 
767    void **copy_image_cs_ptr = &sctx->cs_copy_image[src_is_1d][dst_is_1d];
768    if (!*copy_image_cs_ptr)
769       *copy_image_cs_ptr = si_create_copy_image_cs(sctx, src_is_1d, dst_is_1d);
770 
771    assert(*copy_image_cs_ptr);
772 
773    si_launch_grid_internal_images(sctx, image, 2, &info, *copy_image_cs_ptr, flags);
774    return true;
775 }
776 
si_retile_dcc(struct si_context * sctx,struct si_texture * tex)777 void si_retile_dcc(struct si_context *sctx, struct si_texture *tex)
778 {
779    /* Set the DCC buffer. */
780    assert(tex->surface.meta_offset && tex->surface.meta_offset <= UINT_MAX);
781    assert(tex->surface.display_dcc_offset && tex->surface.display_dcc_offset <= UINT_MAX);
782    assert(tex->surface.display_dcc_offset < tex->surface.meta_offset);
783    assert(tex->buffer.bo_size <= UINT_MAX);
784 
785    struct pipe_shader_buffer sb = {};
786    sb.buffer = &tex->buffer.b.b;
787    sb.buffer_offset = tex->surface.display_dcc_offset;
788    sb.buffer_size = tex->buffer.bo_size - sb.buffer_offset;
789 
790    sctx->cs_user_data[0] = tex->surface.meta_offset - tex->surface.display_dcc_offset;
791    sctx->cs_user_data[1] = (tex->surface.u.gfx9.color.dcc_pitch_max + 1) |
792                            (tex->surface.u.gfx9.color.dcc_height << 16);
793    sctx->cs_user_data[2] = (tex->surface.u.gfx9.color.display_dcc_pitch_max + 1) |
794                            (tex->surface.u.gfx9.color.display_dcc_height << 16);
795 
796    /* We have only 1 variant per bpp for now, so expect 32 bpp. */
797    assert(tex->surface.bpe == 4);
798 
799    void **shader = &sctx->cs_dcc_retile[tex->surface.u.gfx9.swizzle_mode];
800    if (!*shader)
801       *shader = si_create_dcc_retile_cs(sctx, &tex->surface);
802 
803    /* Dispatch compute. */
804    unsigned width = DIV_ROUND_UP(tex->buffer.b.b.width0, tex->surface.u.gfx9.color.dcc_block_width);
805    unsigned height = DIV_ROUND_UP(tex->buffer.b.b.height0, tex->surface.u.gfx9.color.dcc_block_height);
806 
807    struct pipe_grid_info info = {};
808    info.block[0] = 8;
809    info.block[1] = 8;
810    info.block[2] = 1;
811    info.last_block[0] = width % info.block[0];
812    info.last_block[1] = height % info.block[1];
813    info.grid[0] = DIV_ROUND_UP(width, info.block[0]);
814    info.grid[1] = DIV_ROUND_UP(height, info.block[1]);
815    info.grid[2] = 1;
816 
817    si_launch_grid_internal_ssbos(sctx, &info, *shader, SI_OP_SYNC_BEFORE,
818                                  SI_COHERENCY_CB_META, 1, &sb, 0x1);
819 
820    /* Don't flush caches. L2 will be flushed by the kernel fence. */
821 }
822 
gfx9_clear_dcc_msaa(struct si_context * sctx,struct pipe_resource * res,uint32_t clear_value,unsigned flags,enum si_coherency coher)823 void gfx9_clear_dcc_msaa(struct si_context *sctx, struct pipe_resource *res, uint32_t clear_value,
824                          unsigned flags, enum si_coherency coher)
825 {
826    struct si_texture *tex = (struct si_texture*)res;
827 
828    assert(sctx->gfx_level < GFX11);
829 
830    /* Set the DCC buffer. */
831    assert(tex->surface.meta_offset && tex->surface.meta_offset <= UINT_MAX);
832    assert(tex->buffer.bo_size <= UINT_MAX);
833 
834    struct pipe_shader_buffer sb = {};
835    sb.buffer = &tex->buffer.b.b;
836    sb.buffer_offset = tex->surface.meta_offset;
837    sb.buffer_size = tex->buffer.bo_size - sb.buffer_offset;
838 
839    sctx->cs_user_data[0] = (tex->surface.u.gfx9.color.dcc_pitch_max + 1) |
840                            (tex->surface.u.gfx9.color.dcc_height << 16);
841    sctx->cs_user_data[1] = (clear_value & 0xffff) |
842                            ((uint32_t)tex->surface.tile_swizzle << 16);
843 
844    /* These variables identify the shader variant. */
845    unsigned swizzle_mode = tex->surface.u.gfx9.swizzle_mode;
846    unsigned bpe_log2 = util_logbase2(tex->surface.bpe);
847    unsigned log2_samples = util_logbase2(tex->buffer.b.b.nr_samples);
848    bool fragments8 = tex->buffer.b.b.nr_storage_samples == 8;
849    bool is_array = tex->buffer.b.b.array_size > 1;
850    void **shader = &sctx->cs_clear_dcc_msaa[swizzle_mode][bpe_log2][fragments8][log2_samples - 2][is_array];
851 
852    if (!*shader)
853       *shader = gfx9_create_clear_dcc_msaa_cs(sctx, tex);
854 
855    /* Dispatch compute. */
856    unsigned width = DIV_ROUND_UP(tex->buffer.b.b.width0, tex->surface.u.gfx9.color.dcc_block_width);
857    unsigned height = DIV_ROUND_UP(tex->buffer.b.b.height0, tex->surface.u.gfx9.color.dcc_block_height);
858    unsigned depth = DIV_ROUND_UP(tex->buffer.b.b.array_size, tex->surface.u.gfx9.color.dcc_block_depth);
859 
860    struct pipe_grid_info info = {};
861    info.block[0] = 8;
862    info.block[1] = 8;
863    info.block[2] = 1;
864    info.last_block[0] = width % info.block[0];
865    info.last_block[1] = height % info.block[1];
866    info.grid[0] = DIV_ROUND_UP(width, info.block[0]);
867    info.grid[1] = DIV_ROUND_UP(height, info.block[1]);
868    info.grid[2] = depth;
869 
870    si_launch_grid_internal_ssbos(sctx, &info, *shader, flags, coher, 1, &sb, 0x1);
871 }
872 
873 /* Expand FMASK to make it identity, so that image stores can ignore it. */
si_compute_expand_fmask(struct pipe_context * ctx,struct pipe_resource * tex)874 void si_compute_expand_fmask(struct pipe_context *ctx, struct pipe_resource *tex)
875 {
876    struct si_context *sctx = (struct si_context *)ctx;
877    bool is_array = tex->target == PIPE_TEXTURE_2D_ARRAY;
878    unsigned log_fragments = util_logbase2(tex->nr_storage_samples);
879    unsigned log_samples = util_logbase2(tex->nr_samples);
880    assert(tex->nr_samples >= 2);
881 
882    assert(sctx->gfx_level < GFX11);
883 
884    /* EQAA FMASK expansion is unimplemented. */
885    if (tex->nr_samples != tex->nr_storage_samples)
886       return;
887 
888    si_make_CB_shader_coherent(sctx, tex->nr_samples, true,
889                               ((struct si_texture*)tex)->surface.u.gfx9.color.dcc.pipe_aligned);
890 
891    /* Save states. */
892    struct pipe_image_view saved_image = {0};
893    util_copy_image_view(&saved_image, &sctx->images[PIPE_SHADER_COMPUTE].views[0]);
894 
895    /* Bind the image. */
896    struct pipe_image_view image = {0};
897    image.resource = tex;
898    /* Don't set WRITE so as not to trigger FMASK expansion, causing
899     * an infinite loop. */
900    image.shader_access = image.access = PIPE_IMAGE_ACCESS_READ;
901    image.format = util_format_linear(tex->format);
902    if (is_array)
903       image.u.tex.last_layer = tex->array_size - 1;
904 
905    ctx->set_shader_images(ctx, PIPE_SHADER_COMPUTE, 0, 1, 0, &image);
906 
907    /* Bind the shader. */
908    void **shader = &sctx->cs_fmask_expand[log_samples - 1][is_array];
909    if (!*shader)
910       *shader = si_create_fmask_expand_cs(ctx, tex->nr_samples, is_array);
911 
912    /* Dispatch compute. */
913    struct pipe_grid_info info = {0};
914    info.block[0] = 8;
915    info.last_block[0] = tex->width0 % 8;
916    info.block[1] = 8;
917    info.last_block[1] = tex->height0 % 8;
918    info.block[2] = 1;
919    info.grid[0] = DIV_ROUND_UP(tex->width0, 8);
920    info.grid[1] = DIV_ROUND_UP(tex->height0, 8);
921    info.grid[2] = is_array ? tex->array_size : 1;
922 
923    si_launch_grid_internal(sctx, &info, *shader, SI_OP_SYNC_BEFORE_AFTER);
924 
925    /* Restore previous states. */
926    ctx->set_shader_images(ctx, PIPE_SHADER_COMPUTE, 0, 1, 0, &saved_image);
927    pipe_resource_reference(&saved_image.resource, NULL);
928 
929    /* Array of fully expanded FMASK values, arranged by [log2(fragments)][log2(samples)-1]. */
930 #define INVALID 0 /* never used */
931    static const uint64_t fmask_expand_values[][4] = {
932       /* samples */
933       /* 2 (8 bpp) 4 (8 bpp)   8 (8-32bpp) 16 (16-64bpp)      fragments */
934       {0x02020202, 0x0E0E0E0E, 0xFEFEFEFE, 0xFFFEFFFE},      /* 1 */
935       {0x02020202, 0xA4A4A4A4, 0xAAA4AAA4, 0xAAAAAAA4},      /* 2 */
936       {INVALID, 0xE4E4E4E4, 0x44443210, 0x4444444444443210}, /* 4 */
937       {INVALID, INVALID, 0x76543210, 0x8888888876543210},    /* 8 */
938    };
939 
940    /* Clear FMASK to identity. */
941    struct si_texture *stex = (struct si_texture *)tex;
942    si_clear_buffer(sctx, tex, stex->surface.fmask_offset, stex->surface.fmask_size,
943                    (uint32_t *)&fmask_expand_values[log_fragments][log_samples - 1],
944                    log_fragments >= 2 && log_samples == 4 ? 8 : 4, SI_OP_SYNC_AFTER,
945                    SI_COHERENCY_SHADER, SI_AUTO_SELECT_CLEAR_METHOD);
946 }
947 
si_init_compute_blit_functions(struct si_context * sctx)948 void si_init_compute_blit_functions(struct si_context *sctx)
949 {
950    sctx->b.clear_buffer = si_pipe_clear_buffer;
951 }
952 
953 /* Clear a region of a color surface to a constant value. */
si_compute_clear_render_target(struct pipe_context * ctx,struct pipe_surface * dstsurf,const union pipe_color_union * color,unsigned dstx,unsigned dsty,unsigned width,unsigned height,bool render_condition_enabled)954 void si_compute_clear_render_target(struct pipe_context *ctx, struct pipe_surface *dstsurf,
955                                     const union pipe_color_union *color, unsigned dstx,
956                                     unsigned dsty, unsigned width, unsigned height,
957                                     bool render_condition_enabled)
958 {
959    struct si_context *sctx = (struct si_context *)ctx;
960    unsigned num_layers = dstsurf->u.tex.last_layer - dstsurf->u.tex.first_layer + 1;
961    unsigned data[4 + sizeof(color->ui)] = {dstx, dsty, dstsurf->u.tex.first_layer, 0};
962 
963    if (width == 0 || height == 0)
964       return;
965 
966    if (util_format_is_srgb(dstsurf->format)) {
967       union pipe_color_union color_srgb;
968       for (int i = 0; i < 3; i++)
969          color_srgb.f[i] = util_format_linear_to_srgb_float(color->f[i]);
970       color_srgb.f[3] = color->f[3];
971       memcpy(data + 4, color_srgb.ui, sizeof(color->ui));
972    } else {
973       memcpy(data + 4, color->ui, sizeof(color->ui));
974    }
975 
976    struct pipe_constant_buffer saved_cb = {};
977    si_get_pipe_constant_buffer(sctx, PIPE_SHADER_COMPUTE, 0, &saved_cb);
978 
979    struct pipe_constant_buffer cb = {};
980    cb.buffer_size = sizeof(data);
981    cb.user_buffer = data;
982    ctx->set_constant_buffer(ctx, PIPE_SHADER_COMPUTE, 0, false, &cb);
983 
984    struct pipe_image_view image = {0};
985    image.resource = dstsurf->texture;
986    image.shader_access = image.access = PIPE_IMAGE_ACCESS_WRITE;
987    image.format = util_format_linear(dstsurf->format);
988    image.u.tex.level = dstsurf->u.tex.level;
989    image.u.tex.first_layer = 0; /* 3D images ignore first_layer (BASE_ARRAY) */
990    image.u.tex.last_layer = dstsurf->u.tex.last_layer;
991 
992    struct pipe_grid_info info = {0};
993    void *shader;
994 
995    if (dstsurf->texture->target != PIPE_TEXTURE_1D_ARRAY) {
996       if (!sctx->cs_clear_render_target)
997          sctx->cs_clear_render_target = si_clear_render_target_shader(ctx);
998       shader = sctx->cs_clear_render_target;
999 
1000       info.block[0] = 8;
1001       info.last_block[0] = width % 8;
1002       info.block[1] = 8;
1003       info.last_block[1] = height % 8;
1004       info.block[2] = 1;
1005       info.grid[0] = DIV_ROUND_UP(width, 8);
1006       info.grid[1] = DIV_ROUND_UP(height, 8);
1007       info.grid[2] = num_layers;
1008    } else {
1009       if (!sctx->cs_clear_render_target_1d_array)
1010          sctx->cs_clear_render_target_1d_array = si_clear_render_target_shader_1d_array(ctx);
1011       shader = sctx->cs_clear_render_target_1d_array;
1012 
1013       info.block[0] = 64;
1014       info.last_block[0] = width % 64;
1015       info.block[1] = 1;
1016       info.block[2] = 1;
1017       info.grid[0] = DIV_ROUND_UP(width, 64);
1018       info.grid[1] = num_layers;
1019       info.grid[2] = 1;
1020    }
1021 
1022    si_launch_grid_internal_images(sctx, &image, 1, &info, shader,
1023                                   SI_OP_SYNC_BEFORE_AFTER |
1024                                   (render_condition_enabled ? SI_OP_CS_RENDER_COND_ENABLE : 0));
1025 
1026    ctx->set_constant_buffer(ctx, PIPE_SHADER_COMPUTE, 0, true, &saved_cb);
1027 }
1028