1 /*
2 * Copyright 2020 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "si_build_pm4.h"
26 #include "ac_debug.h"
27 #include "ac_shadowed_regs.h"
28 #include "util/u_memory.h"
29
si_build_load_reg(struct si_screen * sscreen,struct si_pm4_state * pm4,enum ac_reg_range_type type,struct si_resource * shadow_regs)30 static void si_build_load_reg(struct si_screen *sscreen, struct si_pm4_state *pm4,
31 enum ac_reg_range_type type,
32 struct si_resource *shadow_regs)
33 {
34 uint64_t gpu_address = shadow_regs->gpu_address;
35 unsigned packet, num_ranges, offset;
36 const struct ac_reg_range *ranges;
37
38 ac_get_reg_ranges(sscreen->info.chip_class, sscreen->info.family,
39 type, &num_ranges, &ranges);
40
41 switch (type) {
42 case SI_REG_RANGE_UCONFIG:
43 gpu_address += SI_SHADOWED_UCONFIG_REG_OFFSET;
44 offset = CIK_UCONFIG_REG_OFFSET;
45 packet = PKT3_LOAD_UCONFIG_REG;
46 break;
47 case SI_REG_RANGE_CONTEXT:
48 gpu_address += SI_SHADOWED_CONTEXT_REG_OFFSET;
49 offset = SI_CONTEXT_REG_OFFSET;
50 packet = PKT3_LOAD_CONTEXT_REG;
51 break;
52 default:
53 gpu_address += SI_SHADOWED_SH_REG_OFFSET;
54 offset = SI_SH_REG_OFFSET;
55 packet = PKT3_LOAD_SH_REG;
56 break;
57 }
58
59 si_pm4_cmd_add(pm4, PKT3(packet, 1 + num_ranges * 2, 0));
60 si_pm4_cmd_add(pm4, gpu_address);
61 si_pm4_cmd_add(pm4, gpu_address >> 32);
62 for (unsigned i = 0; i < num_ranges; i++) {
63 si_pm4_cmd_add(pm4, (ranges[i].offset - offset) / 4);
64 si_pm4_cmd_add(pm4, ranges[i].size / 4);
65 }
66 }
67
68 static struct si_pm4_state *
si_create_shadowing_ib_preamble(struct si_context * sctx)69 si_create_shadowing_ib_preamble(struct si_context *sctx)
70 {
71 struct si_pm4_state *pm4 = CALLOC_STRUCT(si_pm4_state);
72
73 if (sctx->screen->dpbb_allowed) {
74 si_pm4_cmd_add(pm4, PKT3(PKT3_EVENT_WRITE, 0, 0));
75 si_pm4_cmd_add(pm4, EVENT_TYPE(V_028A90_BREAK_BATCH) | EVENT_INDEX(0));
76 }
77
78 /* Wait for idle, because we'll update VGT ring pointers. */
79 si_pm4_cmd_add(pm4, PKT3(PKT3_EVENT_WRITE, 0, 0));
80 si_pm4_cmd_add(pm4, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
81
82 /* VGT_FLUSH is required even if VGT is idle. It resets VGT pointers. */
83 si_pm4_cmd_add(pm4, PKT3(PKT3_EVENT_WRITE, 0, 0));
84 si_pm4_cmd_add(pm4, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
85
86 if (sctx->chip_class >= GFX10) {
87 unsigned gcr_cntl = S_586_GL2_INV(1) | S_586_GL2_WB(1) |
88 S_586_GLM_INV(1) | S_586_GLM_WB(1) |
89 S_586_GL1_INV(1) | S_586_GLV_INV(1) |
90 S_586_GLK_INV(1) | S_586_GLI_INV(V_586_GLI_ALL);
91
92 si_pm4_cmd_add(pm4, PKT3(PKT3_ACQUIRE_MEM, 6, 0));
93 si_pm4_cmd_add(pm4, 0); /* CP_COHER_CNTL */
94 si_pm4_cmd_add(pm4, 0xffffffff); /* CP_COHER_SIZE */
95 si_pm4_cmd_add(pm4, 0xffffff); /* CP_COHER_SIZE_HI */
96 si_pm4_cmd_add(pm4, 0); /* CP_COHER_BASE */
97 si_pm4_cmd_add(pm4, 0); /* CP_COHER_BASE_HI */
98 si_pm4_cmd_add(pm4, 0x0000000A); /* POLL_INTERVAL */
99 si_pm4_cmd_add(pm4, gcr_cntl); /* GCR_CNTL */
100 } else if (sctx->chip_class == GFX9) {
101 unsigned cp_coher_cntl = S_0301F0_SH_ICACHE_ACTION_ENA(1) |
102 S_0301F0_SH_KCACHE_ACTION_ENA(1) |
103 S_0301F0_TC_ACTION_ENA(1) |
104 S_0301F0_TCL1_ACTION_ENA(1) |
105 S_0301F0_TC_WB_ACTION_ENA(1);
106
107 si_pm4_cmd_add(pm4, PKT3(PKT3_ACQUIRE_MEM, 5, 0));
108 si_pm4_cmd_add(pm4, cp_coher_cntl); /* CP_COHER_CNTL */
109 si_pm4_cmd_add(pm4, 0xffffffff); /* CP_COHER_SIZE */
110 si_pm4_cmd_add(pm4, 0xffffff); /* CP_COHER_SIZE_HI */
111 si_pm4_cmd_add(pm4, 0); /* CP_COHER_BASE */
112 si_pm4_cmd_add(pm4, 0); /* CP_COHER_BASE_HI */
113 si_pm4_cmd_add(pm4, 0x0000000A); /* POLL_INTERVAL */
114 } else {
115 unreachable("invalid chip");
116 }
117
118 si_pm4_cmd_add(pm4, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
119 si_pm4_cmd_add(pm4, 0);
120
121 si_pm4_cmd_add(pm4, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
122 si_pm4_cmd_add(pm4,
123 CC0_UPDATE_LOAD_ENABLES(1) |
124 CC0_LOAD_PER_CONTEXT_STATE(1) |
125 CC0_LOAD_CS_SH_REGS(1) |
126 CC0_LOAD_GFX_SH_REGS(1) |
127 CC0_LOAD_GLOBAL_UCONFIG(1));
128 si_pm4_cmd_add(pm4,
129 CC1_UPDATE_SHADOW_ENABLES(1) |
130 CC1_SHADOW_PER_CONTEXT_STATE(1) |
131 CC1_SHADOW_CS_SH_REGS(1) |
132 CC1_SHADOW_GFX_SH_REGS(1) |
133 CC1_SHADOW_GLOBAL_UCONFIG(1));
134
135 for (unsigned i = 0; i < SI_NUM_SHADOWED_REG_RANGES; i++)
136 si_build_load_reg(sctx->screen, pm4, i, sctx->shadowed_regs);
137
138 return pm4;
139 }
140
si_set_context_reg_array(struct radeon_cmdbuf * cs,unsigned reg,unsigned num,const uint32_t * values)141 static void si_set_context_reg_array(struct radeon_cmdbuf *cs, unsigned reg, unsigned num,
142 const uint32_t *values)
143 {
144 radeon_begin(cs);
145 radeon_set_context_reg_seq(reg, num);
146 radeon_emit_array(values, num);
147 radeon_end();
148 }
149
si_init_cp_reg_shadowing(struct si_context * sctx)150 void si_init_cp_reg_shadowing(struct si_context *sctx)
151 {
152 if (sctx->screen->info.mid_command_buffer_preemption_enabled ||
153 sctx->screen->debug_flags & DBG(SHADOW_REGS)) {
154 sctx->shadowed_regs =
155 si_aligned_buffer_create(sctx->b.screen,
156 SI_RESOURCE_FLAG_UNMAPPABLE | SI_RESOURCE_FLAG_DRIVER_INTERNAL,
157 PIPE_USAGE_DEFAULT,
158 SI_SHADOWED_REG_BUFFER_SIZE,
159 4096);
160 if (!sctx->shadowed_regs)
161 fprintf(stderr, "radeonsi: cannot create a shadowed_regs buffer\n");
162 }
163
164 si_init_cs_preamble_state(sctx, sctx->shadowed_regs != NULL);
165
166 if (sctx->shadowed_regs) {
167 /* We need to clear the shadowed reg buffer. */
168 si_cp_dma_clear_buffer(sctx, &sctx->gfx_cs, &sctx->shadowed_regs->b.b,
169 0, sctx->shadowed_regs->bo_size, 0, SI_OP_SYNC_AFTER,
170 SI_COHERENCY_CP, L2_BYPASS);
171
172 /* Create the shadowing preamble. */
173 struct si_pm4_state *shadowing_preamble =
174 si_create_shadowing_ib_preamble(sctx);
175
176 /* Initialize shadowed registers as follows. */
177 radeon_add_to_buffer_list(sctx, &sctx->gfx_cs, sctx->shadowed_regs,
178 RADEON_USAGE_READWRITE, RADEON_PRIO_DESCRIPTORS);
179 si_pm4_emit(sctx, shadowing_preamble);
180 ac_emulate_clear_state(&sctx->screen->info, &sctx->gfx_cs, si_set_context_reg_array);
181 si_pm4_emit(sctx, sctx->cs_preamble_state);
182
183 /* The register values are shadowed, so we won't need to set them again. */
184 si_pm4_free_state(sctx, sctx->cs_preamble_state, ~0);
185 sctx->cs_preamble_state = NULL;
186
187 si_set_tracked_regs_to_clear_state(sctx);
188
189 /* Setup preemption. The shadowing preamble will be executed as a preamble IB,
190 * which will load register values from memory on a context switch.
191 */
192 sctx->ws->cs_setup_preemption(&sctx->gfx_cs, shadowing_preamble->pm4,
193 shadowing_preamble->ndw);
194 si_pm4_free_state(sctx, shadowing_preamble, ~0);
195 }
196 }
197