• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  *
4  * SPDX-License-Identifier: MIT
5  */
6 
7 #include "si_pipe.h"
8 #include "sid.h"
9 #include "si_build_pm4.h"
10 
11 /* Set this if you want the ME to wait until CP DMA is done.
12  * It should be set on the last CP DMA packet. */
13 #define CP_DMA_SYNC        (1 << 0)
14 
15 /* Set this if the source data was used as a destination in a previous CP DMA
16  * packet. It's for preventing a read-after-write (RAW) hazard between two
17  * CP DMA packets. */
18 #define CP_DMA_RAW_WAIT    (1 << 1)
19 #define CP_DMA_CLEAR       (1 << 2)
20 
21 /* The max number of bytes that can be copied per packet. */
cp_dma_max_byte_count(struct si_context * sctx)22 static inline unsigned cp_dma_max_byte_count(struct si_context *sctx)
23 {
24    unsigned max =
25       sctx->gfx_level >= GFX11 ? 32767 :
26       sctx->gfx_level >= GFX9 ? S_415_BYTE_COUNT_GFX9(~0u) : S_415_BYTE_COUNT_GFX6(~0u);
27 
28    /* make it aligned for optimal performance */
29    return max & ~(SI_CPDMA_ALIGNMENT - 1);
30 }
31 
32 /* should cp dma skip the hole in sparse bo */
cp_dma_sparse_wa(struct si_context * sctx,struct si_resource * sdst)33 static inline bool cp_dma_sparse_wa(struct si_context *sctx, struct si_resource *sdst)
34 {
35    return sctx->gfx_level == GFX9 && sdst->flags & RADEON_FLAG_SPARSE;
36 }
37 
38 /* Emit a CP DMA packet to do a copy from one buffer to another, or to clear
39  * a buffer. The size must fit in bits [20:0]. If CP_DMA_CLEAR is set, src_va is a 32-bit
40  * clear value.
41  */
si_emit_cp_dma(struct si_context * sctx,struct radeon_cmdbuf * cs,uint64_t dst_va,uint64_t src_va,unsigned size,unsigned flags)42 static void si_emit_cp_dma(struct si_context *sctx, struct radeon_cmdbuf *cs, uint64_t dst_va,
43                            uint64_t src_va, unsigned size, unsigned flags)
44 {
45    uint32_t header = 0, command = 0;
46 
47    assert(sctx->screen->info.has_cp_dma);
48    assert(size <= cp_dma_max_byte_count(sctx));
49 
50    if (sctx->gfx_level >= GFX9)
51       command |= S_415_BYTE_COUNT_GFX9(size);
52    else
53       command |= S_415_BYTE_COUNT_GFX6(size);
54 
55    /* Sync flags. */
56    if (flags & CP_DMA_SYNC)
57       header |= S_411_CP_SYNC(1);
58 
59    if (flags & CP_DMA_RAW_WAIT)
60       command |= S_415_RAW_WAIT(1);
61 
62    /* Src and dst flags. */
63    if (sctx->screen->info.cp_dma_use_L2)
64       header |= S_501_DST_SEL(V_501_DST_ADDR_TC_L2);
65 
66    if (flags & CP_DMA_CLEAR) {
67       header |= S_411_SRC_SEL(V_411_DATA);
68    } else if (sctx->screen->info.cp_dma_use_L2) {
69       header |= S_501_SRC_SEL(V_501_SRC_ADDR_TC_L2);
70    }
71 
72    radeon_begin(cs);
73 
74    if (sctx->gfx_level >= GFX7) {
75       radeon_emit(PKT3(PKT3_DMA_DATA, 5, 0));
76       radeon_emit(header);
77       radeon_emit(src_va);       /* SRC_ADDR_LO [31:0] */
78       radeon_emit(src_va >> 32); /* SRC_ADDR_HI [31:0] */
79       radeon_emit(dst_va);       /* DST_ADDR_LO [31:0] */
80       radeon_emit(dst_va >> 32); /* DST_ADDR_HI [31:0] */
81       radeon_emit(command);
82    } else {
83       header |= S_411_SRC_ADDR_HI(src_va >> 32);
84 
85       radeon_emit(PKT3(PKT3_CP_DMA, 4, 0));
86       radeon_emit(src_va);                  /* SRC_ADDR_LO [31:0] */
87       radeon_emit(header);                  /* SRC_ADDR_HI [15:0] + flags. */
88       radeon_emit(dst_va);                  /* DST_ADDR_LO [31:0] */
89       radeon_emit((dst_va >> 32) & 0xffff); /* DST_ADDR_HI [15:0] */
90       radeon_emit(command);
91    }
92    radeon_end();
93 }
94 
si_cp_dma_wait_for_idle(struct si_context * sctx,struct radeon_cmdbuf * cs)95 void si_cp_dma_wait_for_idle(struct si_context *sctx, struct radeon_cmdbuf *cs)
96 {
97    /* Issue a dummy DMA that copies zero bytes.
98     *
99     * The DMA engine will see that there's no work to do and skip this
100     * DMA request, however, the CP will see the sync flag and still wait
101     * for all DMAs to complete.
102     */
103    si_emit_cp_dma(sctx, cs, 0, 0, 0, CP_DMA_SYNC);
104 }
105 
si_cp_dma_prepare(struct si_context * sctx,struct pipe_resource * dst,struct pipe_resource * src,unsigned byte_count,uint64_t remaining_size,bool * is_first,unsigned * packet_flags)106 static void si_cp_dma_prepare(struct si_context *sctx, struct pipe_resource *dst,
107                               struct pipe_resource *src, unsigned byte_count,
108                               uint64_t remaining_size, bool *is_first, unsigned *packet_flags)
109 {
110    si_need_gfx_cs_space(sctx, 0);
111 
112    /* This must be done after need_cs_space. */
113    radeon_add_to_buffer_list(sctx, &sctx->gfx_cs, si_resource(dst),
114                              RADEON_USAGE_WRITE | RADEON_PRIO_CP_DMA);
115    if (src)
116       radeon_add_to_buffer_list(sctx, &sctx->gfx_cs, si_resource(src),
117                                 RADEON_USAGE_READ | RADEON_PRIO_CP_DMA);
118 
119    /* Flush the caches for the first copy only.
120     * Also wait for the previous CP DMA operations.
121     */
122    if (*is_first)
123       si_emit_barrier_direct(sctx);
124 
125    if (*is_first && !(*packet_flags & CP_DMA_CLEAR))
126       *packet_flags |= CP_DMA_RAW_WAIT;
127 
128    *is_first = false;
129 
130    /* Do the synchronization after the last dma, so that all data
131     * is written to memory.
132     */
133    if (byte_count == remaining_size)
134       *packet_flags |= CP_DMA_SYNC;
135 }
136 
si_cp_dma_clear_buffer(struct si_context * sctx,struct radeon_cmdbuf * cs,struct pipe_resource * dst,uint64_t offset,uint64_t size,unsigned value)137 void si_cp_dma_clear_buffer(struct si_context *sctx, struct radeon_cmdbuf *cs,
138                             struct pipe_resource *dst, uint64_t offset, uint64_t size,
139                             unsigned value)
140 {
141    struct si_resource *sdst = si_resource(dst);
142    uint64_t va = sdst->gpu_address + offset;
143    bool is_first = true;
144 
145    assert(!sctx->screen->info.cp_sdma_ge_use_system_memory_scope);
146    assert(size && size % 4 == 0);
147 
148    if (!sctx->screen->info.cp_dma_use_L2) {
149       sctx->barrier_flags |= SI_BARRIER_INV_L2;
150       si_mark_atom_dirty(sctx, &sctx->atoms.s.barrier);
151    }
152 
153    /* Mark the buffer range of destination as valid (initialized),
154     * so that transfer_map knows it should wait for the GPU when mapping
155     * that range. */
156    util_range_add(dst, &sdst->valid_buffer_range, offset, offset + size);
157 
158    while (size) {
159       unsigned byte_count = MIN2(size, cp_dma_max_byte_count(sctx));
160       unsigned dma_flags = CP_DMA_CLEAR;
161 
162       if (cp_dma_sparse_wa(sctx, sdst)) {
163          unsigned skip_count =
164             sctx->ws->buffer_find_next_committed_memory(sdst->buf,
165                   va - sdst->gpu_address, &byte_count);
166          va += skip_count;
167          size -= skip_count;
168       }
169 
170       if (!byte_count)
171          continue;
172 
173       si_cp_dma_prepare(sctx, dst, NULL, byte_count, size, &is_first, &dma_flags);
174 
175       /* Emit the clear packet. */
176       si_emit_cp_dma(sctx, cs, va, value, byte_count, dma_flags);
177 
178       size -= byte_count;
179       va += byte_count;
180    }
181 
182    sctx->num_cp_dma_calls++;
183 }
184 
185 /**
186  * Realign the CP DMA engine. This must be done after a copy with an unaligned
187  * size.
188  *
189  * \param size  Remaining size to the CP DMA alignment.
190  */
si_cp_dma_realign_engine(struct si_context * sctx,unsigned size,bool * is_first)191 static void si_cp_dma_realign_engine(struct si_context *sctx, unsigned size, bool *is_first)
192 {
193    uint64_t va;
194    unsigned dma_flags = 0;
195    unsigned scratch_size = SI_CPDMA_ALIGNMENT * 2;
196 
197    assert(size < SI_CPDMA_ALIGNMENT);
198 
199    /* Use the scratch buffer as the dummy buffer. The 3D engine should be
200     * idle at this point.
201     */
202    if (!sctx->scratch_buffer || sctx->scratch_buffer->b.b.width0 < scratch_size) {
203       si_resource_reference(&sctx->scratch_buffer, NULL);
204       sctx->scratch_buffer = si_aligned_buffer_create(&sctx->screen->b,
205                                                       PIPE_RESOURCE_FLAG_UNMAPPABLE | SI_RESOURCE_FLAG_DRIVER_INTERNAL |
206                                                       SI_RESOURCE_FLAG_DISCARDABLE,
207                                                       PIPE_USAGE_DEFAULT, scratch_size, 256);
208       if (!sctx->scratch_buffer)
209          return;
210 
211       si_mark_atom_dirty(sctx, &sctx->atoms.s.scratch_state);
212    }
213 
214    si_cp_dma_prepare(sctx, &sctx->scratch_buffer->b.b, &sctx->scratch_buffer->b.b, size, size,
215                      is_first, &dma_flags);
216 
217    va = sctx->scratch_buffer->gpu_address;
218    si_emit_cp_dma(sctx, &sctx->gfx_cs, va, va + SI_CPDMA_ALIGNMENT, size, dma_flags);
219 }
220 
221 /**
222  * Do memcpy between buffers using CP DMA.
223  */
si_cp_dma_copy_buffer(struct si_context * sctx,struct pipe_resource * dst,struct pipe_resource * src,uint64_t dst_offset,uint64_t src_offset,unsigned size)224 void si_cp_dma_copy_buffer(struct si_context *sctx, struct pipe_resource *dst,
225                            struct pipe_resource *src, uint64_t dst_offset, uint64_t src_offset,
226                            unsigned size)
227 {
228    assert(size);
229    assert(dst && src);
230 
231    if (!sctx->screen->info.cp_dma_use_L2) {
232       sctx->barrier_flags |= SI_BARRIER_INV_L2;
233       si_mark_atom_dirty(sctx, &sctx->atoms.s.barrier);
234    }
235 
236    /* Mark the buffer range of destination as valid (initialized),
237     * so that transfer_map knows it should wait for the GPU when mapping
238     * that range.
239     */
240    util_range_add(dst, &si_resource(dst)->valid_buffer_range, dst_offset, dst_offset + size);
241 
242    dst_offset += si_resource(dst)->gpu_address;
243    src_offset += si_resource(src)->gpu_address;
244 
245    unsigned skipped_size = 0;
246    unsigned realign_size = 0;
247 
248    /* The workarounds aren't needed on Fiji and beyond. */
249    if (sctx->family <= CHIP_CARRIZO || sctx->family == CHIP_STONEY) {
250       /* If the size is not aligned, we must add a dummy copy at the end
251        * just to align the internal counter. Otherwise, the DMA engine
252        * would slow down by an order of magnitude for following copies.
253        */
254       if (size % SI_CPDMA_ALIGNMENT)
255          realign_size = SI_CPDMA_ALIGNMENT - (size % SI_CPDMA_ALIGNMENT);
256 
257       /* If the copy begins unaligned, we must start copying from the next
258        * aligned block and the skipped part should be copied after everything
259        * else has been copied. Only the src alignment matters, not dst.
260        */
261       if (src_offset % SI_CPDMA_ALIGNMENT) {
262          skipped_size = SI_CPDMA_ALIGNMENT - (src_offset % SI_CPDMA_ALIGNMENT);
263          /* The main part will be skipped if the size is too small. */
264          skipped_size = MIN2(skipped_size, size);
265          size -= skipped_size;
266       }
267    }
268 
269    /* TMZ handling */
270    if (unlikely(radeon_uses_secure_bos(sctx->ws))) {
271       bool secure = si_resource(src)->flags & RADEON_FLAG_ENCRYPTED;
272       assert(!secure || si_resource(dst)->flags & RADEON_FLAG_ENCRYPTED);
273       if (secure != sctx->ws->cs_is_secure(&sctx->gfx_cs)) {
274          si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW |
275                                RADEON_FLUSH_TOGGLE_SECURE_SUBMISSION, NULL);
276       }
277    }
278 
279    /* This is the main part doing the copying. Src is always aligned. */
280    uint64_t main_dst_offset = dst_offset + skipped_size;
281    uint64_t main_src_offset = src_offset + skipped_size;
282    bool is_first = true;
283 
284    while (size) {
285       unsigned byte_count = MIN2(size, cp_dma_max_byte_count(sctx));
286       unsigned dma_flags = 0;
287 
288       if (cp_dma_sparse_wa(sctx, si_resource(dst))) {
289          unsigned skip_count =
290             sctx->ws->buffer_find_next_committed_memory(si_resource(dst)->buf,
291                   main_dst_offset - si_resource(dst)->gpu_address, &byte_count);
292          main_dst_offset += skip_count;
293          main_src_offset += skip_count;
294          size -= skip_count;
295       }
296 
297       if (cp_dma_sparse_wa(sctx, si_resource(src))) {
298          unsigned skip_count =
299             sctx->ws->buffer_find_next_committed_memory(si_resource(src)->buf,
300                   main_src_offset - si_resource(src)->gpu_address, &byte_count);
301          main_dst_offset += skip_count;
302          main_src_offset += skip_count;
303          size -= skip_count;
304       }
305 
306       if (!byte_count)
307          continue;
308 
309       si_cp_dma_prepare(sctx, dst, src, byte_count, size + skipped_size + realign_size,
310                         &is_first, &dma_flags);
311 
312       si_emit_cp_dma(sctx, &sctx->gfx_cs, main_dst_offset, main_src_offset, byte_count, dma_flags);
313 
314       size -= byte_count;
315       main_src_offset += byte_count;
316       main_dst_offset += byte_count;
317    }
318 
319    /* Copy the part we skipped because src wasn't aligned. */
320    if (skipped_size) {
321       unsigned dma_flags = 0;
322 
323       si_cp_dma_prepare(sctx, dst, src, skipped_size, skipped_size + realign_size,
324                         &is_first, &dma_flags);
325 
326       si_emit_cp_dma(sctx, &sctx->gfx_cs, dst_offset, src_offset, skipped_size, dma_flags);
327    }
328 
329    /* Finally, realign the engine if the size wasn't aligned. */
330    if (realign_size)
331       si_cp_dma_realign_engine(sctx, realign_size, &is_first);
332 
333    sctx->num_cp_dma_calls++;
334 }
335 
si_cp_write_data(struct si_context * sctx,struct si_resource * buf,unsigned offset,unsigned size,unsigned dst_sel,unsigned engine,const void * data)336 void si_cp_write_data(struct si_context *sctx, struct si_resource *buf, unsigned offset,
337                       unsigned size, unsigned dst_sel, unsigned engine, const void *data)
338 {
339    struct radeon_cmdbuf *cs = &sctx->gfx_cs;
340 
341    assert(offset % 4 == 0);
342    assert(size % 4 == 0);
343 
344    if (sctx->gfx_level == GFX6 && dst_sel == V_370_MEM)
345       dst_sel = V_370_MEM_GRBM;
346 
347    radeon_add_to_buffer_list(sctx, cs, buf, RADEON_USAGE_WRITE | RADEON_PRIO_CP_DMA);
348    uint64_t va = buf->gpu_address + offset;
349 
350    radeon_begin(cs);
351    radeon_emit(PKT3(PKT3_WRITE_DATA, 2 + size / 4, 0));
352    radeon_emit(S_370_DST_SEL(dst_sel) | S_370_WR_CONFIRM(1) | S_370_ENGINE_SEL(engine));
353    radeon_emit(va);
354    radeon_emit(va >> 32);
355    radeon_emit_array((const uint32_t *)data, size / 4);
356    radeon_end();
357 }
358 
si_cp_copy_data(struct si_context * sctx,struct radeon_cmdbuf * cs,unsigned dst_sel,struct si_resource * dst,unsigned dst_offset,unsigned src_sel,struct si_resource * src,unsigned src_offset)359 void si_cp_copy_data(struct si_context *sctx, struct radeon_cmdbuf *cs, unsigned dst_sel,
360                      struct si_resource *dst, unsigned dst_offset, unsigned src_sel,
361                      struct si_resource *src, unsigned src_offset)
362 {
363    /* cs can point to the compute IB, which has the buffer list in gfx_cs. */
364    if (dst) {
365       radeon_add_to_buffer_list(sctx, &sctx->gfx_cs, dst, RADEON_USAGE_WRITE | RADEON_PRIO_CP_DMA);
366    }
367    if (src) {
368       radeon_add_to_buffer_list(sctx, &sctx->gfx_cs, src, RADEON_USAGE_READ | RADEON_PRIO_CP_DMA);
369    }
370 
371    uint64_t dst_va = (dst ? dst->gpu_address : 0ull) + dst_offset;
372    uint64_t src_va = (src ? src->gpu_address : 0ull) + src_offset;
373 
374    radeon_begin(cs);
375    radeon_emit(PKT3(PKT3_COPY_DATA, 4, 0));
376    radeon_emit(COPY_DATA_SRC_SEL(src_sel) | COPY_DATA_DST_SEL(dst_sel) | COPY_DATA_WR_CONFIRM);
377    radeon_emit(src_va);
378    radeon_emit(src_va >> 32);
379    radeon_emit(dst_va);
380    radeon_emit(dst_va >> 32);
381    radeon_end();
382 }
383