• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2020 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * on the rights to use, copy, modify, merge, publish, distribute, sub
9  * license, and/or sell copies of the Software, and to permit persons to whom
10  * the Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22  * USE OR OTHER DEALINGS IN THE SOFTWARE.
23  */
24 
25 #include "si_build_pm4.h"
26 #include "ac_debug.h"
27 #include "ac_shadowed_regs.h"
28 #include "util/u_memory.h"
29 
si_build_load_reg(struct si_screen * sscreen,struct si_pm4_state * pm4,enum ac_reg_range_type type,struct si_resource * shadow_regs)30 static void si_build_load_reg(struct si_screen *sscreen, struct si_pm4_state *pm4,
31                               enum ac_reg_range_type type,
32                               struct si_resource *shadow_regs)
33 {
34    uint64_t gpu_address = shadow_regs->gpu_address;
35    unsigned packet, num_ranges, offset;
36    const struct ac_reg_range *ranges;
37 
38    ac_get_reg_ranges(sscreen->info.gfx_level, sscreen->info.family,
39                      type, &num_ranges, &ranges);
40 
41    switch (type) {
42    case SI_REG_RANGE_UCONFIG:
43       gpu_address += SI_SHADOWED_UCONFIG_REG_OFFSET;
44       offset = CIK_UCONFIG_REG_OFFSET;
45       packet = PKT3_LOAD_UCONFIG_REG;
46       break;
47    case SI_REG_RANGE_CONTEXT:
48       gpu_address += SI_SHADOWED_CONTEXT_REG_OFFSET;
49       offset = SI_CONTEXT_REG_OFFSET;
50       packet = PKT3_LOAD_CONTEXT_REG;
51       break;
52    default:
53       gpu_address += SI_SHADOWED_SH_REG_OFFSET;
54       offset = SI_SH_REG_OFFSET;
55       packet = PKT3_LOAD_SH_REG;
56       break;
57    }
58 
59    si_pm4_cmd_add(pm4, PKT3(packet, 1 + num_ranges * 2, 0));
60    si_pm4_cmd_add(pm4, gpu_address);
61    si_pm4_cmd_add(pm4, gpu_address >> 32);
62    for (unsigned i = 0; i < num_ranges; i++) {
63       si_pm4_cmd_add(pm4, (ranges[i].offset - offset) / 4);
64       si_pm4_cmd_add(pm4, ranges[i].size / 4);
65    }
66 }
67 
68 static struct si_pm4_state *
si_create_shadowing_ib_preamble(struct si_context * sctx)69 si_create_shadowing_ib_preamble(struct si_context *sctx)
70 {
71    struct si_shadow_preamble {
72       struct si_pm4_state pm4;
73       uint32_t more_pm4[150]; /* Add more space because the command buffer is large. */
74    };
75    struct si_pm4_state *pm4 = (struct si_pm4_state *)CALLOC_STRUCT(si_shadow_preamble);
76 
77    /* Add all the space that we allocated. */
78    pm4->max_dw = sizeof(struct si_shadow_preamble) - offsetof(struct si_shadow_preamble, pm4.pm4);
79 
80    if (sctx->screen->dpbb_allowed) {
81       si_pm4_cmd_add(pm4, PKT3(PKT3_EVENT_WRITE, 0, 0));
82       si_pm4_cmd_add(pm4, EVENT_TYPE(V_028A90_BREAK_BATCH) | EVENT_INDEX(0));
83    }
84 
85    /* Wait for idle, because we'll update VGT ring pointers. */
86    si_pm4_cmd_add(pm4, PKT3(PKT3_EVENT_WRITE, 0, 0));
87    si_pm4_cmd_add(pm4, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
88 
89    /* VGT_FLUSH is required even if VGT is idle. It resets VGT pointers. */
90    si_pm4_cmd_add(pm4, PKT3(PKT3_EVENT_WRITE, 0, 0));
91    si_pm4_cmd_add(pm4, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
92 
93    if (sctx->gfx_level >= GFX11) {
94       /* We must wait for idle using an EOP event before changing the attribute ring registers.
95        * Use the bottom-of-pipe EOP event, but increment the PWS counter instead of writing memory.
96        */
97       si_pm4_cmd_add(pm4, PKT3(PKT3_RELEASE_MEM, 6, 0));
98       si_pm4_cmd_add(pm4, S_490_EVENT_TYPE(V_028A90_BOTTOM_OF_PIPE_TS) |
99                           S_490_EVENT_INDEX(5) |
100                           S_490_PWS_ENABLE(1));
101       si_pm4_cmd_add(pm4, 0); /* DST_SEL, INT_SEL, DATA_SEL */
102       si_pm4_cmd_add(pm4, 0); /* ADDRESS_LO */
103       si_pm4_cmd_add(pm4, 0); /* ADDRESS_HI */
104       si_pm4_cmd_add(pm4, 0); /* DATA_LO */
105       si_pm4_cmd_add(pm4, 0); /* DATA_HI */
106       si_pm4_cmd_add(pm4, 0); /* INT_CTXID */
107 
108       unsigned gcr_cntl = S_586_GL2_INV(1) | S_586_GL2_WB(1) |
109                           S_586_GLM_INV(1) | S_586_GLM_WB(1) |
110                           S_586_GL1_INV(1) | S_586_GLV_INV(1) |
111                           S_586_GLK_INV(1) | S_586_GLI_INV(V_586_GLI_ALL);
112 
113       /* Wait for the PWS counter. */
114       si_pm4_cmd_add(pm4, PKT3(PKT3_ACQUIRE_MEM, 6, 0));
115       si_pm4_cmd_add(pm4, S_580_PWS_STAGE_SEL(V_580_CP_PFP) |
116                           S_580_PWS_COUNTER_SEL(V_580_TS_SELECT) |
117                           S_580_PWS_ENA2(1) |
118                           S_580_PWS_COUNT(0));
119       si_pm4_cmd_add(pm4, 0xffffffff); /* GCR_SIZE */
120       si_pm4_cmd_add(pm4, 0x01ffffff); /* GCR_SIZE_HI */
121       si_pm4_cmd_add(pm4, 0); /* GCR_BASE_LO */
122       si_pm4_cmd_add(pm4, 0); /* GCR_BASE_HI */
123       si_pm4_cmd_add(pm4, S_585_PWS_ENA(1));
124       si_pm4_cmd_add(pm4, gcr_cntl); /* GCR_CNTL */
125    } else if (sctx->gfx_level >= GFX10) {
126       unsigned gcr_cntl = S_586_GL2_INV(1) | S_586_GL2_WB(1) |
127                           S_586_GLM_INV(1) | S_586_GLM_WB(1) |
128                           S_586_GL1_INV(1) | S_586_GLV_INV(1) |
129                           S_586_GLK_INV(1) | S_586_GLI_INV(V_586_GLI_ALL);
130 
131       si_pm4_cmd_add(pm4, PKT3(PKT3_ACQUIRE_MEM, 6, 0));
132       si_pm4_cmd_add(pm4, 0);           /* CP_COHER_CNTL */
133       si_pm4_cmd_add(pm4, 0xffffffff);  /* CP_COHER_SIZE */
134       si_pm4_cmd_add(pm4, 0xffffff);    /* CP_COHER_SIZE_HI */
135       si_pm4_cmd_add(pm4, 0);           /* CP_COHER_BASE */
136       si_pm4_cmd_add(pm4, 0);           /* CP_COHER_BASE_HI */
137       si_pm4_cmd_add(pm4, 0x0000000A);  /* POLL_INTERVAL */
138       si_pm4_cmd_add(pm4, gcr_cntl);    /* GCR_CNTL */
139 
140       si_pm4_cmd_add(pm4, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
141       si_pm4_cmd_add(pm4, 0);
142    } else if (sctx->gfx_level == GFX9) {
143       unsigned cp_coher_cntl = S_0301F0_SH_ICACHE_ACTION_ENA(1) |
144                                S_0301F0_SH_KCACHE_ACTION_ENA(1) |
145                                S_0301F0_TC_ACTION_ENA(1) |
146                                S_0301F0_TCL1_ACTION_ENA(1) |
147                                S_0301F0_TC_WB_ACTION_ENA(1);
148 
149       si_pm4_cmd_add(pm4, PKT3(PKT3_ACQUIRE_MEM, 5, 0));
150       si_pm4_cmd_add(pm4, cp_coher_cntl); /* CP_COHER_CNTL */
151       si_pm4_cmd_add(pm4, 0xffffffff);    /* CP_COHER_SIZE */
152       si_pm4_cmd_add(pm4, 0xffffff);      /* CP_COHER_SIZE_HI */
153       si_pm4_cmd_add(pm4, 0);             /* CP_COHER_BASE */
154       si_pm4_cmd_add(pm4, 0);             /* CP_COHER_BASE_HI */
155       si_pm4_cmd_add(pm4, 0x0000000A);    /* POLL_INTERVAL */
156 
157       si_pm4_cmd_add(pm4, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
158       si_pm4_cmd_add(pm4, 0);
159    } else {
160       unreachable("invalid chip");
161    }
162 
163    si_pm4_cmd_add(pm4, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
164    si_pm4_cmd_add(pm4,
165                   CC0_UPDATE_LOAD_ENABLES(1) |
166                   CC0_LOAD_PER_CONTEXT_STATE(1) |
167                   CC0_LOAD_CS_SH_REGS(1) |
168                   CC0_LOAD_GFX_SH_REGS(1) |
169                   CC0_LOAD_GLOBAL_UCONFIG(1));
170    si_pm4_cmd_add(pm4,
171                   CC1_UPDATE_SHADOW_ENABLES(1) |
172                   CC1_SHADOW_PER_CONTEXT_STATE(1) |
173                   CC1_SHADOW_CS_SH_REGS(1) |
174                   CC1_SHADOW_GFX_SH_REGS(1) |
175                   CC1_SHADOW_GLOBAL_UCONFIG(1));
176 
177    for (unsigned i = 0; i < SI_NUM_SHADOWED_REG_RANGES; i++)
178       si_build_load_reg(sctx->screen, pm4, i, sctx->shadowed_regs);
179 
180    return pm4;
181 }
182 
si_set_context_reg_array(struct radeon_cmdbuf * cs,unsigned reg,unsigned num,const uint32_t * values)183 static void si_set_context_reg_array(struct radeon_cmdbuf *cs, unsigned reg, unsigned num,
184                                      const uint32_t *values)
185 {
186    radeon_begin(cs);
187    radeon_set_context_reg_seq(reg, num);
188    radeon_emit_array(values, num);
189    radeon_end();
190 }
191 
si_init_cp_reg_shadowing(struct si_context * sctx)192 void si_init_cp_reg_shadowing(struct si_context *sctx)
193 {
194    if (sctx->screen->info.mid_command_buffer_preemption_enabled ||
195        sctx->screen->debug_flags & DBG(SHADOW_REGS)) {
196       sctx->shadowed_regs =
197             si_aligned_buffer_create(sctx->b.screen,
198                                      PIPE_RESOURCE_FLAG_UNMAPPABLE | SI_RESOURCE_FLAG_DRIVER_INTERNAL,
199                                      PIPE_USAGE_DEFAULT,
200                                      SI_SHADOWED_REG_BUFFER_SIZE,
201                                      4096);
202       if (!sctx->shadowed_regs)
203          fprintf(stderr, "radeonsi: cannot create a shadowed_regs buffer\n");
204    }
205 
206    si_init_cs_preamble_state(sctx, sctx->shadowed_regs != NULL);
207 
208    if (sctx->shadowed_regs) {
209       /* We need to clear the shadowed reg buffer. */
210       si_cp_dma_clear_buffer(sctx, &sctx->gfx_cs, &sctx->shadowed_regs->b.b,
211                              0, sctx->shadowed_regs->bo_size, 0, SI_OP_SYNC_AFTER,
212                              SI_COHERENCY_CP, L2_BYPASS);
213 
214       /* Create the shadowing preamble. */
215       struct si_pm4_state *shadowing_preamble =
216             si_create_shadowing_ib_preamble(sctx);
217 
218       /* Initialize shadowed registers as follows. */
219       radeon_add_to_buffer_list(sctx, &sctx->gfx_cs, sctx->shadowed_regs,
220                                 RADEON_USAGE_READWRITE | RADEON_PRIO_DESCRIPTORS);
221       si_pm4_emit(sctx, shadowing_preamble);
222       ac_emulate_clear_state(&sctx->screen->info, &sctx->gfx_cs, si_set_context_reg_array);
223       si_pm4_emit(sctx, sctx->cs_preamble_state);
224 
225       /* The register values are shadowed, so we won't need to set them again. */
226       si_pm4_free_state(sctx, sctx->cs_preamble_state, ~0);
227       sctx->cs_preamble_state = NULL;
228 
229       si_set_tracked_regs_to_clear_state(sctx);
230 
231       /* Setup preemption. The shadowing preamble will be executed as a preamble IB,
232        * which will load register values from memory on a context switch.
233        */
234       sctx->ws->cs_setup_preemption(&sctx->gfx_cs, shadowing_preamble->pm4,
235                                     shadowing_preamble->ndw);
236       si_pm4_free_state(sctx, shadowing_preamble, ~0);
237    }
238 }
239