• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2022 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 
25 #include "amdgpu.h"
26 #include "amdgpu_gfx.h"
27 #include "soc15.h"
28 #include "soc15d.h"
29 #include "soc15_common.h"
30 #include "vega10_enum.h"
31 
32 #include "v9_structs.h"
33 
34 #include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
35 
36 #include "gc/gc_9_4_3_offset.h"
37 #include "gc/gc_9_4_3_sh_mask.h"
38 
39 #include "gfx_v9_4_3.h"
40 #include "gfx_v9_4_3_cleaner_shader.h"
41 #include "amdgpu_xcp.h"
42 #include "amdgpu_aca.h"
43 
44 MODULE_FIRMWARE("amdgpu/gc_9_4_3_mec.bin");
45 MODULE_FIRMWARE("amdgpu/gc_9_4_4_mec.bin");
46 MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin");
47 MODULE_FIRMWARE("amdgpu/gc_9_4_4_rlc.bin");
48 MODULE_FIRMWARE("amdgpu/gc_9_4_3_sjt_mec.bin");
49 MODULE_FIRMWARE("amdgpu/gc_9_4_4_sjt_mec.bin");
50 
51 #define GFX9_MEC_HPD_SIZE 4096
52 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
53 
54 #define GOLDEN_GB_ADDR_CONFIG 0x2a114042
55 #define CP_HQD_PERSISTENT_STATE_DEFAULT 0xbe05301
56 
57 #define mmSMNAID_XCD0_MCA_SMU 0x36430400	/* SMN AID XCD0 */
58 #define mmSMNAID_XCD1_MCA_SMU 0x38430400	/* SMN AID XCD1 */
59 #define mmSMNXCD_XCD0_MCA_SMU 0x40430400	/* SMN XCD XCD0 */
60 
61 #define XCC_REG_RANGE_0_LOW  0x2000     /* XCC gfxdec0 lower Bound */
62 #define XCC_REG_RANGE_0_HIGH 0x3400     /* XCC gfxdec0 upper Bound */
63 #define XCC_REG_RANGE_1_LOW  0xA000     /* XCC gfxdec1 lower Bound */
64 #define XCC_REG_RANGE_1_HIGH 0x10000    /* XCC gfxdec1 upper Bound */
65 
66 #define NORMALIZE_XCC_REG_OFFSET(offset) \
67 	(offset & 0xFFFF)
68 
69 static const struct amdgpu_hwip_reg_entry gc_reg_list_9_4_3[] = {
70 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS),
71 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS2),
72 	SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT1),
73 	SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT2),
74 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STALLED_STAT1),
75 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STALLED_STAT1),
76 	SOC15_REG_ENTRY_STR(GC, 0, regCP_BUSY_STAT),
77 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_BUSY_STAT),
78 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_BUSY_STAT),
79 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STATUS),
80 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_ERROR),
81 	SOC15_REG_ENTRY_STR(GC, 0, regCPF_UTCL1_STATUS),
82 	SOC15_REG_ENTRY_STR(GC, 0, regCPC_UTCL1_STATUS),
83 	SOC15_REG_ENTRY_STR(GC, 0, regCPG_UTCL1_STATUS),
84 	SOC15_REG_ENTRY_STR(GC, 0, regGDS_PROTECTION_FAULT),
85 	SOC15_REG_ENTRY_STR(GC, 0, regGDS_VM_PROTECTION_FAULT),
86 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_UTCL1_STATUS),
87 	SOC15_REG_ENTRY_STR(GC, 0, regRMI_UTCL1_STATUS),
88 	SOC15_REG_ENTRY_STR(GC, 0, regSQC_DCACHE_UTCL1_STATUS),
89 	SOC15_REG_ENTRY_STR(GC, 0, regSQC_ICACHE_UTCL1_STATUS),
90 	SOC15_REG_ENTRY_STR(GC, 0, regSQ_UTCL1_STATUS),
91 	SOC15_REG_ENTRY_STR(GC, 0, regTCP_UTCL1_STATUS),
92 	SOC15_REG_ENTRY_STR(GC, 0, regWD_UTCL1_STATUS),
93 	SOC15_REG_ENTRY_STR(GC, 0, regVM_L2_PROTECTION_FAULT_CNTL),
94 	SOC15_REG_ENTRY_STR(GC, 0, regVM_L2_PROTECTION_FAULT_STATUS),
95 	SOC15_REG_ENTRY_STR(GC, 0, regCP_DEBUG),
96 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_CNTL),
97 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC1_INSTR_PNTR),
98 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC2_INSTR_PNTR),
99 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STATUS),
100 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_STAT),
101 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_COMMAND),
102 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_MESSAGE),
103 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_ARGUMENT_1),
104 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_ARGUMENT_2),
105 	SOC15_REG_ENTRY_STR(GC, 0, regSMU_RLC_RESPONSE),
106 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_SAFE_MODE),
107 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_SAFE_MODE),
108 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_INT_STAT),
109 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_GPM_GENERAL_6),
110 	/* cp header registers */
111 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
112 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME2_HEADER_DUMP),
113 	/* SE status registers */
114 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE0),
115 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE1),
116 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE2),
117 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE3)
118 };
119 
120 static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_9_4_3[] = {
121 	/* compute queue registers */
122 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_VMID),
123 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_ACTIVE),
124 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PERSISTENT_STATE),
125 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PIPE_PRIORITY),
126 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUEUE_PRIORITY),
127 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUANTUM),
128 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE),
129 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE_HI),
130 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_RPTR),
131 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR),
132 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI),
133 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL),
134 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_CONTROL),
135 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR),
136 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR_HI),
137 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_RPTR),
138 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_CONTROL),
139 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_REQUEST),
140 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR),
141 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI),
142 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_CONTROL),
143 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_RPTR),
144 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR),
145 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_EVENTS),
146 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_LO),
147 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_HI),
148 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_CONTROL),
149 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_OFFSET),
150 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_SIZE),
151 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_WG_STATE_OFFSET),
152 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_SIZE),
153 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GDS_RESOURCE_STATE),
154 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_ERROR),
155 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR_MEM),
156 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_LO),
157 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_HI),
158 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GFX_STATUS),
159 };
160 
161 struct amdgpu_gfx_ras gfx_v9_4_3_ras;
162 
163 static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev);
164 static void gfx_v9_4_3_set_irq_funcs(struct amdgpu_device *adev);
165 static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev);
166 static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev);
167 static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
168 				struct amdgpu_cu_info *cu_info);
169 static void gfx_v9_4_3_xcc_set_safe_mode(struct amdgpu_device *adev, int xcc_id);
170 static void gfx_v9_4_3_xcc_unset_safe_mode(struct amdgpu_device *adev, int xcc_id);
171 
gfx_v9_4_3_kiq_set_resources(struct amdgpu_ring * kiq_ring,uint64_t queue_mask)172 static void gfx_v9_4_3_kiq_set_resources(struct amdgpu_ring *kiq_ring,
173 				uint64_t queue_mask)
174 {
175 	struct amdgpu_device *adev = kiq_ring->adev;
176 	u64 shader_mc_addr;
177 
178 	/* Cleaner shader MC address */
179 	shader_mc_addr = adev->gfx.cleaner_shader_gpu_addr >> 8;
180 
181 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
182 	amdgpu_ring_write(kiq_ring,
183 		PACKET3_SET_RESOURCES_VMID_MASK(0) |
184 		/* vmid_mask:0* queue_type:0 (KIQ) */
185 		PACKET3_SET_RESOURCES_QUEUE_TYPE(0));
186 	amdgpu_ring_write(kiq_ring,
187 			lower_32_bits(queue_mask));	/* queue mask lo */
188 	amdgpu_ring_write(kiq_ring,
189 			upper_32_bits(queue_mask));	/* queue mask hi */
190 	amdgpu_ring_write(kiq_ring, lower_32_bits(shader_mc_addr)); /* cleaner shader addr lo */
191 	amdgpu_ring_write(kiq_ring, upper_32_bits(shader_mc_addr)); /* cleaner shader addr hi */
192 	amdgpu_ring_write(kiq_ring, 0);	/* oac mask */
193 	amdgpu_ring_write(kiq_ring, 0);	/* gds heap base:0, gds heap size:0 */
194 }
195 
gfx_v9_4_3_kiq_map_queues(struct amdgpu_ring * kiq_ring,struct amdgpu_ring * ring)196 static void gfx_v9_4_3_kiq_map_queues(struct amdgpu_ring *kiq_ring,
197 				 struct amdgpu_ring *ring)
198 {
199 	struct amdgpu_device *adev = kiq_ring->adev;
200 	uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
201 	uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
202 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
203 
204 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
205 	/* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
206 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
207 			 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
208 			 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
209 			 PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
210 			 PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
211 			 PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
212 			 /*queue_type: normal compute queue */
213 			 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) |
214 			 /* alloc format: all_on_one_pipe */
215 			 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) |
216 			 PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
217 			 /* num_queues: must be 1 */
218 			 PACKET3_MAP_QUEUES_NUM_QUEUES(1));
219 	amdgpu_ring_write(kiq_ring,
220 			PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
221 	amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
222 	amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
223 	amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
224 	amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
225 }
226 
gfx_v9_4_3_kiq_unmap_queues(struct amdgpu_ring * kiq_ring,struct amdgpu_ring * ring,enum amdgpu_unmap_queues_action action,u64 gpu_addr,u64 seq)227 static void gfx_v9_4_3_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
228 				   struct amdgpu_ring *ring,
229 				   enum amdgpu_unmap_queues_action action,
230 				   u64 gpu_addr, u64 seq)
231 {
232 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
233 
234 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
235 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
236 			  PACKET3_UNMAP_QUEUES_ACTION(action) |
237 			  PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
238 			  PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
239 			  PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
240 	amdgpu_ring_write(kiq_ring,
241 			PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
242 
243 	if (action == PREEMPT_QUEUES_NO_UNMAP) {
244 		amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
245 		amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
246 		amdgpu_ring_write(kiq_ring, seq);
247 	} else {
248 		amdgpu_ring_write(kiq_ring, 0);
249 		amdgpu_ring_write(kiq_ring, 0);
250 		amdgpu_ring_write(kiq_ring, 0);
251 	}
252 }
253 
gfx_v9_4_3_kiq_query_status(struct amdgpu_ring * kiq_ring,struct amdgpu_ring * ring,u64 addr,u64 seq)254 static void gfx_v9_4_3_kiq_query_status(struct amdgpu_ring *kiq_ring,
255 				   struct amdgpu_ring *ring,
256 				   u64 addr,
257 				   u64 seq)
258 {
259 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
260 
261 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
262 	amdgpu_ring_write(kiq_ring,
263 			  PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
264 			  PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
265 			  PACKET3_QUERY_STATUS_COMMAND(2));
266 	/* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
267 	amdgpu_ring_write(kiq_ring,
268 			PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
269 			PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
270 	amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
271 	amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
272 	amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
273 	amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
274 }
275 
gfx_v9_4_3_kiq_invalidate_tlbs(struct amdgpu_ring * kiq_ring,uint16_t pasid,uint32_t flush_type,bool all_hub)276 static void gfx_v9_4_3_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
277 				uint16_t pasid, uint32_t flush_type,
278 				bool all_hub)
279 {
280 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
281 	amdgpu_ring_write(kiq_ring,
282 			PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
283 			PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
284 			PACKET3_INVALIDATE_TLBS_PASID(pasid) |
285 			PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
286 }
287 
gfx_v9_4_3_kiq_reset_hw_queue(struct amdgpu_ring * kiq_ring,uint32_t queue_type,uint32_t me_id,uint32_t pipe_id,uint32_t queue_id,uint32_t xcc_id,uint32_t vmid)288 static void gfx_v9_4_3_kiq_reset_hw_queue(struct amdgpu_ring *kiq_ring, uint32_t queue_type,
289 					  uint32_t me_id, uint32_t pipe_id, uint32_t queue_id,
290 					  uint32_t xcc_id, uint32_t vmid)
291 {
292 	struct amdgpu_device *adev = kiq_ring->adev;
293 	unsigned i;
294 
295 	/* enter save mode */
296 	amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
297 	mutex_lock(&adev->srbm_mutex);
298 	soc15_grbm_select(adev, me_id, pipe_id, queue_id, 0, xcc_id);
299 
300 	if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
301 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 0x2);
302 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_COMPUTE_QUEUE_RESET, 0x1);
303 		/* wait till dequeue take effects */
304 		for (i = 0; i < adev->usec_timeout; i++) {
305 			if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
306 				break;
307 			udelay(1);
308 		}
309 		if (i >= adev->usec_timeout)
310 			dev_err(adev->dev, "fail to wait on hqd deactive\n");
311 	} else {
312 		dev_err(adev->dev, "reset queue_type(%d) not supported\n\n", queue_type);
313 	}
314 
315 	soc15_grbm_select(adev, 0, 0, 0, 0, 0);
316 	mutex_unlock(&adev->srbm_mutex);
317 	/* exit safe mode */
318 	amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
319 }
320 
321 static const struct kiq_pm4_funcs gfx_v9_4_3_kiq_pm4_funcs = {
322 	.kiq_set_resources = gfx_v9_4_3_kiq_set_resources,
323 	.kiq_map_queues = gfx_v9_4_3_kiq_map_queues,
324 	.kiq_unmap_queues = gfx_v9_4_3_kiq_unmap_queues,
325 	.kiq_query_status = gfx_v9_4_3_kiq_query_status,
326 	.kiq_invalidate_tlbs = gfx_v9_4_3_kiq_invalidate_tlbs,
327 	.kiq_reset_hw_queue = gfx_v9_4_3_kiq_reset_hw_queue,
328 	.set_resources_size = 8,
329 	.map_queues_size = 7,
330 	.unmap_queues_size = 6,
331 	.query_status_size = 7,
332 	.invalidate_tlbs_size = 2,
333 };
334 
gfx_v9_4_3_set_kiq_pm4_funcs(struct amdgpu_device * adev)335 static void gfx_v9_4_3_set_kiq_pm4_funcs(struct amdgpu_device *adev)
336 {
337 	int i, num_xcc;
338 
339 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
340 	for (i = 0; i < num_xcc; i++)
341 		adev->gfx.kiq[i].pmf = &gfx_v9_4_3_kiq_pm4_funcs;
342 }
343 
gfx_v9_4_3_init_golden_registers(struct amdgpu_device * adev)344 static void gfx_v9_4_3_init_golden_registers(struct amdgpu_device *adev)
345 {
346 	int i, num_xcc, dev_inst;
347 
348 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
349 	for (i = 0; i < num_xcc; i++) {
350 		dev_inst = GET_INST(GC, i);
351 
352 		WREG32_SOC15(GC, dev_inst, regGB_ADDR_CONFIG,
353 			     GOLDEN_GB_ADDR_CONFIG);
354 		/* Golden settings applied by driver for ASIC with rev_id 0 */
355 		if (adev->rev_id == 0) {
356 			WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL1,
357 					      REDUCE_FIFO_DEPTH_BY_2, 2);
358 		} else {
359 			WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL2,
360 						SPARE, 0x1);
361 		}
362 	}
363 }
364 
gfx_v9_4_3_normalize_xcc_reg_offset(uint32_t reg)365 static uint32_t gfx_v9_4_3_normalize_xcc_reg_offset(uint32_t reg)
366 {
367 	uint32_t normalized_reg = NORMALIZE_XCC_REG_OFFSET(reg);
368 
369 	/* If it is an XCC reg, normalize the reg to keep
370 	   lower 16 bits in local xcc */
371 
372 	if (((normalized_reg >= XCC_REG_RANGE_0_LOW) && (normalized_reg < XCC_REG_RANGE_0_HIGH)) ||
373 		((normalized_reg >= XCC_REG_RANGE_1_LOW) && (normalized_reg < XCC_REG_RANGE_1_HIGH)))
374 		return normalized_reg;
375 	else
376 		return reg;
377 }
378 
gfx_v9_4_3_write_data_to_reg(struct amdgpu_ring * ring,int eng_sel,bool wc,uint32_t reg,uint32_t val)379 static void gfx_v9_4_3_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
380 				       bool wc, uint32_t reg, uint32_t val)
381 {
382 	reg = gfx_v9_4_3_normalize_xcc_reg_offset(reg);
383 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
384 	amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
385 				WRITE_DATA_DST_SEL(0) |
386 				(wc ? WR_CONFIRM : 0));
387 	amdgpu_ring_write(ring, reg);
388 	amdgpu_ring_write(ring, 0);
389 	amdgpu_ring_write(ring, val);
390 }
391 
gfx_v9_4_3_wait_reg_mem(struct amdgpu_ring * ring,int eng_sel,int mem_space,int opt,uint32_t addr0,uint32_t addr1,uint32_t ref,uint32_t mask,uint32_t inv)392 static void gfx_v9_4_3_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
393 				  int mem_space, int opt, uint32_t addr0,
394 				  uint32_t addr1, uint32_t ref, uint32_t mask,
395 				  uint32_t inv)
396 {
397 	/* Only do the normalization on regspace */
398 	if (mem_space == 0) {
399 		addr0 = gfx_v9_4_3_normalize_xcc_reg_offset(addr0);
400 		addr1 = gfx_v9_4_3_normalize_xcc_reg_offset(addr1);
401 	}
402 
403 	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
404 	amdgpu_ring_write(ring,
405 				 /* memory (1) or register (0) */
406 				 (WAIT_REG_MEM_MEM_SPACE(mem_space) |
407 				 WAIT_REG_MEM_OPERATION(opt) | /* wait */
408 				 WAIT_REG_MEM_FUNCTION(3) |  /* equal */
409 				 WAIT_REG_MEM_ENGINE(eng_sel)));
410 
411 	if (mem_space)
412 		BUG_ON(addr0 & 0x3); /* Dword align */
413 	amdgpu_ring_write(ring, addr0);
414 	amdgpu_ring_write(ring, addr1);
415 	amdgpu_ring_write(ring, ref);
416 	amdgpu_ring_write(ring, mask);
417 	amdgpu_ring_write(ring, inv); /* poll interval */
418 }
419 
gfx_v9_4_3_ring_test_ring(struct amdgpu_ring * ring)420 static int gfx_v9_4_3_ring_test_ring(struct amdgpu_ring *ring)
421 {
422 	uint32_t scratch_reg0_offset, xcc_offset;
423 	struct amdgpu_device *adev = ring->adev;
424 	uint32_t tmp = 0;
425 	unsigned i;
426 	int r;
427 
428 	/* Use register offset which is local to XCC in the packet */
429 	xcc_offset = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0);
430 	scratch_reg0_offset = SOC15_REG_OFFSET(GC, GET_INST(GC, ring->xcc_id), regSCRATCH_REG0);
431 	WREG32(scratch_reg0_offset, 0xCAFEDEAD);
432 	tmp = RREG32(scratch_reg0_offset);
433 
434 	r = amdgpu_ring_alloc(ring, 3);
435 	if (r)
436 		return r;
437 
438 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
439 	amdgpu_ring_write(ring, xcc_offset - PACKET3_SET_UCONFIG_REG_START);
440 	amdgpu_ring_write(ring, 0xDEADBEEF);
441 	amdgpu_ring_commit(ring);
442 
443 	for (i = 0; i < adev->usec_timeout; i++) {
444 		tmp = RREG32(scratch_reg0_offset);
445 		if (tmp == 0xDEADBEEF)
446 			break;
447 		udelay(1);
448 	}
449 
450 	if (i >= adev->usec_timeout)
451 		r = -ETIMEDOUT;
452 	return r;
453 }
454 
gfx_v9_4_3_ring_test_ib(struct amdgpu_ring * ring,long timeout)455 static int gfx_v9_4_3_ring_test_ib(struct amdgpu_ring *ring, long timeout)
456 {
457 	struct amdgpu_device *adev = ring->adev;
458 	struct amdgpu_ib ib;
459 	struct dma_fence *f = NULL;
460 
461 	unsigned index;
462 	uint64_t gpu_addr;
463 	uint32_t tmp;
464 	long r;
465 
466 	r = amdgpu_device_wb_get(adev, &index);
467 	if (r)
468 		return r;
469 
470 	gpu_addr = adev->wb.gpu_addr + (index * 4);
471 	adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
472 	memset(&ib, 0, sizeof(ib));
473 
474 	r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
475 	if (r)
476 		goto err1;
477 
478 	ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
479 	ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
480 	ib.ptr[2] = lower_32_bits(gpu_addr);
481 	ib.ptr[3] = upper_32_bits(gpu_addr);
482 	ib.ptr[4] = 0xDEADBEEF;
483 	ib.length_dw = 5;
484 
485 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
486 	if (r)
487 		goto err2;
488 
489 	r = dma_fence_wait_timeout(f, false, timeout);
490 	if (r == 0) {
491 		r = -ETIMEDOUT;
492 		goto err2;
493 	} else if (r < 0) {
494 		goto err2;
495 	}
496 
497 	tmp = adev->wb.wb[index];
498 	if (tmp == 0xDEADBEEF)
499 		r = 0;
500 	else
501 		r = -EINVAL;
502 
503 err2:
504 	amdgpu_ib_free(adev, &ib, NULL);
505 	dma_fence_put(f);
506 err1:
507 	amdgpu_device_wb_free(adev, index);
508 	return r;
509 }
510 
511 
512 /* This value might differs per partition */
gfx_v9_4_3_get_gpu_clock_counter(struct amdgpu_device * adev)513 static uint64_t gfx_v9_4_3_get_gpu_clock_counter(struct amdgpu_device *adev)
514 {
515 	uint64_t clock;
516 
517 	mutex_lock(&adev->gfx.gpu_clock_mutex);
518 	WREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
519 	clock = (uint64_t)RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_GPU_CLOCK_COUNT_LSB) |
520 		((uint64_t)RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
521 	mutex_unlock(&adev->gfx.gpu_clock_mutex);
522 
523 	return clock;
524 }
525 
gfx_v9_4_3_free_microcode(struct amdgpu_device * adev)526 static void gfx_v9_4_3_free_microcode(struct amdgpu_device *adev)
527 {
528 	amdgpu_ucode_release(&adev->gfx.pfp_fw);
529 	amdgpu_ucode_release(&adev->gfx.me_fw);
530 	amdgpu_ucode_release(&adev->gfx.ce_fw);
531 	amdgpu_ucode_release(&adev->gfx.rlc_fw);
532 	amdgpu_ucode_release(&adev->gfx.mec_fw);
533 	amdgpu_ucode_release(&adev->gfx.mec2_fw);
534 
535 	kfree(adev->gfx.rlc.register_list_format);
536 }
537 
gfx_v9_4_3_init_rlc_microcode(struct amdgpu_device * adev,const char * chip_name)538 static int gfx_v9_4_3_init_rlc_microcode(struct amdgpu_device *adev,
539 					  const char *chip_name)
540 {
541 	int err;
542 	const struct rlc_firmware_header_v2_0 *rlc_hdr;
543 	uint16_t version_major;
544 	uint16_t version_minor;
545 
546 
547 	err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
548 				   "amdgpu/%s_rlc.bin", chip_name);
549 	if (err)
550 		goto out;
551 	rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
552 
553 	version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
554 	version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
555 	err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
556 out:
557 	if (err)
558 		amdgpu_ucode_release(&adev->gfx.rlc_fw);
559 
560 	return err;
561 }
562 
gfx_v9_4_3_should_disable_gfxoff(struct pci_dev * pdev)563 static bool gfx_v9_4_3_should_disable_gfxoff(struct pci_dev *pdev)
564 {
565 	return true;
566 }
567 
gfx_v9_4_3_check_if_need_gfxoff(struct amdgpu_device * adev)568 static void gfx_v9_4_3_check_if_need_gfxoff(struct amdgpu_device *adev)
569 {
570 	if (gfx_v9_4_3_should_disable_gfxoff(adev->pdev))
571 		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
572 }
573 
gfx_v9_4_3_init_cp_compute_microcode(struct amdgpu_device * adev,const char * chip_name)574 static int gfx_v9_4_3_init_cp_compute_microcode(struct amdgpu_device *adev,
575 					  const char *chip_name)
576 {
577 	int err;
578 
579 	if (amdgpu_sriov_vf(adev))
580 		err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
581 				"amdgpu/%s_sjt_mec.bin", chip_name);
582 	else
583 		err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
584 				"amdgpu/%s_mec.bin", chip_name);
585 	if (err)
586 		goto out;
587 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
588 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
589 
590 	adev->gfx.mec2_fw_version = adev->gfx.mec_fw_version;
591 	adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version;
592 
593 	gfx_v9_4_3_check_if_need_gfxoff(adev);
594 
595 out:
596 	if (err)
597 		amdgpu_ucode_release(&adev->gfx.mec_fw);
598 	return err;
599 }
600 
gfx_v9_4_3_init_microcode(struct amdgpu_device * adev)601 static int gfx_v9_4_3_init_microcode(struct amdgpu_device *adev)
602 {
603 	char ucode_prefix[15];
604 	int r;
605 
606 	amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
607 
608 	r = gfx_v9_4_3_init_rlc_microcode(adev, ucode_prefix);
609 	if (r)
610 		return r;
611 
612 	r = gfx_v9_4_3_init_cp_compute_microcode(adev, ucode_prefix);
613 	if (r)
614 		return r;
615 
616 	return r;
617 }
618 
gfx_v9_4_3_mec_fini(struct amdgpu_device * adev)619 static void gfx_v9_4_3_mec_fini(struct amdgpu_device *adev)
620 {
621 	amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
622 	amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
623 }
624 
gfx_v9_4_3_mec_init(struct amdgpu_device * adev)625 static int gfx_v9_4_3_mec_init(struct amdgpu_device *adev)
626 {
627 	int r, i, num_xcc;
628 	u32 *hpd;
629 	const __le32 *fw_data;
630 	unsigned fw_size;
631 	u32 *fw;
632 	size_t mec_hpd_size;
633 
634 	const struct gfx_firmware_header_v1_0 *mec_hdr;
635 
636 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
637 	for (i = 0; i < num_xcc; i++)
638 		bitmap_zero(adev->gfx.mec_bitmap[i].queue_bitmap,
639 			AMDGPU_MAX_COMPUTE_QUEUES);
640 
641 	/* take ownership of the relevant compute queues */
642 	amdgpu_gfx_compute_queue_acquire(adev);
643 	mec_hpd_size =
644 		adev->gfx.num_compute_rings * num_xcc * GFX9_MEC_HPD_SIZE;
645 	if (mec_hpd_size) {
646 		r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
647 					      AMDGPU_GEM_DOMAIN_VRAM |
648 					      AMDGPU_GEM_DOMAIN_GTT,
649 					      &adev->gfx.mec.hpd_eop_obj,
650 					      &adev->gfx.mec.hpd_eop_gpu_addr,
651 					      (void **)&hpd);
652 		if (r) {
653 			dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
654 			gfx_v9_4_3_mec_fini(adev);
655 			return r;
656 		}
657 
658 		if (amdgpu_emu_mode == 1) {
659 			for (i = 0; i < mec_hpd_size / 4; i++) {
660 				memset((void *)(hpd + i), 0, 4);
661 				if (i % 50 == 0)
662 					msleep(1);
663 			}
664 		} else {
665 			memset(hpd, 0, mec_hpd_size);
666 		}
667 
668 		amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
669 		amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
670 	}
671 
672 	mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
673 
674 	fw_data = (const __le32 *)
675 		(adev->gfx.mec_fw->data +
676 		 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
677 	fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes);
678 
679 	r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
680 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
681 				      &adev->gfx.mec.mec_fw_obj,
682 				      &adev->gfx.mec.mec_fw_gpu_addr,
683 				      (void **)&fw);
684 	if (r) {
685 		dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
686 		gfx_v9_4_3_mec_fini(adev);
687 		return r;
688 	}
689 
690 	memcpy(fw, fw_data, fw_size);
691 
692 	amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
693 	amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
694 
695 	return 0;
696 }
697 
gfx_v9_4_3_xcc_select_se_sh(struct amdgpu_device * adev,u32 se_num,u32 sh_num,u32 instance,int xcc_id)698 static void gfx_v9_4_3_xcc_select_se_sh(struct amdgpu_device *adev, u32 se_num,
699 					u32 sh_num, u32 instance, int xcc_id)
700 {
701 	u32 data;
702 
703 	if (instance == 0xffffffff)
704 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
705 				     INSTANCE_BROADCAST_WRITES, 1);
706 	else
707 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
708 				     INSTANCE_INDEX, instance);
709 
710 	if (se_num == 0xffffffff)
711 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
712 				     SE_BROADCAST_WRITES, 1);
713 	else
714 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
715 
716 	if (sh_num == 0xffffffff)
717 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
718 				     SH_BROADCAST_WRITES, 1);
719 	else
720 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
721 
722 	WREG32_SOC15_RLC_SHADOW_EX(reg, GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX, data);
723 }
724 
wave_read_ind(struct amdgpu_device * adev,uint32_t xcc_id,uint32_t simd,uint32_t wave,uint32_t address)725 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t address)
726 {
727 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX,
728 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
729 		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
730 		(address << SQ_IND_INDEX__INDEX__SHIFT) |
731 		(SQ_IND_INDEX__FORCE_READ_MASK));
732 	return RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA);
733 }
734 
wave_read_regs(struct amdgpu_device * adev,uint32_t xcc_id,uint32_t simd,uint32_t wave,uint32_t thread,uint32_t regno,uint32_t num,uint32_t * out)735 static void wave_read_regs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
736 			   uint32_t wave, uint32_t thread,
737 			   uint32_t regno, uint32_t num, uint32_t *out)
738 {
739 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX,
740 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
741 		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
742 		(regno << SQ_IND_INDEX__INDEX__SHIFT) |
743 		(thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
744 		(SQ_IND_INDEX__FORCE_READ_MASK) |
745 		(SQ_IND_INDEX__AUTO_INCR_MASK));
746 	while (num--)
747 		*(out++) = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA);
748 }
749 
gfx_v9_4_3_read_wave_data(struct amdgpu_device * adev,uint32_t xcc_id,uint32_t simd,uint32_t wave,uint32_t * dst,int * no_fields)750 static void gfx_v9_4_3_read_wave_data(struct amdgpu_device *adev,
751 				      uint32_t xcc_id, uint32_t simd, uint32_t wave,
752 				      uint32_t *dst, int *no_fields)
753 {
754 	/* type 1 wave data */
755 	dst[(*no_fields)++] = 1;
756 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_STATUS);
757 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_LO);
758 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_HI);
759 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_LO);
760 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_HI);
761 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_HW_ID);
762 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW0);
763 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW1);
764 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_GPR_ALLOC);
765 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_LDS_ALLOC);
766 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_TRAPSTS);
767 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_STS);
768 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_DBG0);
769 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_M0);
770 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_MODE);
771 }
772 
gfx_v9_4_3_read_wave_sgprs(struct amdgpu_device * adev,uint32_t xcc_id,uint32_t simd,uint32_t wave,uint32_t start,uint32_t size,uint32_t * dst)773 static void gfx_v9_4_3_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
774 				       uint32_t wave, uint32_t start,
775 				       uint32_t size, uint32_t *dst)
776 {
777 	wave_read_regs(adev, xcc_id, simd, wave, 0,
778 		       start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
779 }
780 
gfx_v9_4_3_read_wave_vgprs(struct amdgpu_device * adev,uint32_t xcc_id,uint32_t simd,uint32_t wave,uint32_t thread,uint32_t start,uint32_t size,uint32_t * dst)781 static void gfx_v9_4_3_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
782 				       uint32_t wave, uint32_t thread,
783 				       uint32_t start, uint32_t size,
784 				       uint32_t *dst)
785 {
786 	wave_read_regs(adev, xcc_id, simd, wave, thread,
787 		       start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
788 }
789 
gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device * adev,u32 me,u32 pipe,u32 q,u32 vm,u32 xcc_id)790 static void gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device *adev,
791 					u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
792 {
793 	soc15_grbm_select(adev, me, pipe, q, vm, GET_INST(GC, xcc_id));
794 }
795 
gfx_v9_4_3_get_xccs_per_xcp(struct amdgpu_device * adev)796 static int gfx_v9_4_3_get_xccs_per_xcp(struct amdgpu_device *adev)
797 {
798 	u32 xcp_ctl;
799 
800 	/* Value is expected to be the same on all, fetch from first instance */
801 	xcp_ctl = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HYP_XCP_CTL);
802 
803 	return REG_GET_FIELD(xcp_ctl, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP);
804 }
805 
gfx_v9_4_3_switch_compute_partition(struct amdgpu_device * adev,int num_xccs_per_xcp)806 static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev,
807 						int num_xccs_per_xcp)
808 {
809 	int ret, i, num_xcc;
810 	u32 tmp = 0;
811 
812 	if (adev->psp.funcs) {
813 		ret = psp_spatial_partition(&adev->psp,
814 					    NUM_XCC(adev->gfx.xcc_mask) /
815 						    num_xccs_per_xcp);
816 		if (ret)
817 			return ret;
818 	} else {
819 		num_xcc = NUM_XCC(adev->gfx.xcc_mask);
820 
821 		for (i = 0; i < num_xcc; i++) {
822 			tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP,
823 					    num_xccs_per_xcp);
824 			tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, VIRTUAL_XCC_ID,
825 					    i % num_xccs_per_xcp);
826 			WREG32_SOC15(GC, GET_INST(GC, i), regCP_HYP_XCP_CTL,
827 				     tmp);
828 		}
829 		ret = 0;
830 	}
831 
832 	adev->gfx.num_xcc_per_xcp = num_xccs_per_xcp;
833 
834 	return ret;
835 }
836 
gfx_v9_4_3_ih_to_xcc_inst(struct amdgpu_device * adev,int ih_node)837 static int gfx_v9_4_3_ih_to_xcc_inst(struct amdgpu_device *adev, int ih_node)
838 {
839 	int xcc;
840 
841 	xcc = hweight8(adev->gfx.xcc_mask & GENMASK(ih_node / 2, 0));
842 	if (!xcc) {
843 		dev_err(adev->dev, "Couldn't find xcc mapping from IH node");
844 		return -EINVAL;
845 	}
846 
847 	return xcc - 1;
848 }
849 
850 static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = {
851 	.get_gpu_clock_counter = &gfx_v9_4_3_get_gpu_clock_counter,
852 	.select_se_sh = &gfx_v9_4_3_xcc_select_se_sh,
853 	.read_wave_data = &gfx_v9_4_3_read_wave_data,
854 	.read_wave_sgprs = &gfx_v9_4_3_read_wave_sgprs,
855 	.read_wave_vgprs = &gfx_v9_4_3_read_wave_vgprs,
856 	.select_me_pipe_q = &gfx_v9_4_3_select_me_pipe_q,
857 	.switch_partition_mode = &gfx_v9_4_3_switch_compute_partition,
858 	.ih_node_to_logical_xcc = &gfx_v9_4_3_ih_to_xcc_inst,
859 	.get_xccs_per_xcp = &gfx_v9_4_3_get_xccs_per_xcp,
860 };
861 
gfx_v9_4_3_aca_bank_parser(struct aca_handle * handle,struct aca_bank * bank,enum aca_smu_type type,void * data)862 static int gfx_v9_4_3_aca_bank_parser(struct aca_handle *handle,
863 				      struct aca_bank *bank, enum aca_smu_type type,
864 				      void *data)
865 {
866 	struct aca_bank_info info;
867 	u64 misc0;
868 	u32 instlo;
869 	int ret;
870 
871 	ret = aca_bank_info_decode(bank, &info);
872 	if (ret)
873 		return ret;
874 
875 	/* NOTE: overwrite info.die_id with xcd id for gfx */
876 	instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
877 	instlo &= GENMASK(31, 1);
878 	info.die_id = instlo == mmSMNAID_XCD0_MCA_SMU ? 0 : 1;
879 
880 	misc0 = bank->regs[ACA_REG_IDX_MISC0];
881 
882 	switch (type) {
883 	case ACA_SMU_TYPE_UE:
884 		ret = aca_error_cache_log_bank_error(handle, &info,
885 						     ACA_ERROR_TYPE_UE, 1ULL);
886 		break;
887 	case ACA_SMU_TYPE_CE:
888 		ret = aca_error_cache_log_bank_error(handle, &info,
889 						     ACA_ERROR_TYPE_CE, ACA_REG__MISC0__ERRCNT(misc0));
890 		break;
891 	default:
892 		return -EINVAL;
893 	}
894 
895 	return ret;
896 }
897 
gfx_v9_4_3_aca_bank_is_valid(struct aca_handle * handle,struct aca_bank * bank,enum aca_smu_type type,void * data)898 static bool gfx_v9_4_3_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
899 					 enum aca_smu_type type, void *data)
900 {
901 	u32 instlo;
902 
903 	instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
904 	instlo &= GENMASK(31, 1);
905 	switch (instlo) {
906 	case mmSMNAID_XCD0_MCA_SMU:
907 	case mmSMNAID_XCD1_MCA_SMU:
908 	case mmSMNXCD_XCD0_MCA_SMU:
909 		return true;
910 	default:
911 		break;
912 	}
913 
914 	return false;
915 }
916 
917 static const struct aca_bank_ops gfx_v9_4_3_aca_bank_ops = {
918 	.aca_bank_parser = gfx_v9_4_3_aca_bank_parser,
919 	.aca_bank_is_valid = gfx_v9_4_3_aca_bank_is_valid,
920 };
921 
922 static const struct aca_info gfx_v9_4_3_aca_info = {
923 	.hwip = ACA_HWIP_TYPE_SMU,
924 	.mask = ACA_ERROR_UE_MASK | ACA_ERROR_CE_MASK,
925 	.bank_ops = &gfx_v9_4_3_aca_bank_ops,
926 };
927 
gfx_v9_4_3_gpu_early_init(struct amdgpu_device * adev)928 static int gfx_v9_4_3_gpu_early_init(struct amdgpu_device *adev)
929 {
930 	u32 gb_addr_config;
931 
932 	adev->gfx.funcs = &gfx_v9_4_3_gfx_funcs;
933 	adev->gfx.ras = &gfx_v9_4_3_ras;
934 
935 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
936 	case IP_VERSION(9, 4, 3):
937 	case IP_VERSION(9, 4, 4):
938 		adev->gfx.config.max_hw_contexts = 8;
939 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
940 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
941 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
942 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
943 		gb_addr_config = RREG32_SOC15(GC, GET_INST(GC, 0), regGB_ADDR_CONFIG);
944 		break;
945 	default:
946 		BUG();
947 		break;
948 	}
949 
950 	adev->gfx.config.gb_addr_config = gb_addr_config;
951 
952 	adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
953 			REG_GET_FIELD(
954 					adev->gfx.config.gb_addr_config,
955 					GB_ADDR_CONFIG,
956 					NUM_PIPES);
957 
958 	adev->gfx.config.max_tile_pipes =
959 		adev->gfx.config.gb_addr_config_fields.num_pipes;
960 
961 	adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
962 			REG_GET_FIELD(
963 					adev->gfx.config.gb_addr_config,
964 					GB_ADDR_CONFIG,
965 					NUM_BANKS);
966 	adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
967 			REG_GET_FIELD(
968 					adev->gfx.config.gb_addr_config,
969 					GB_ADDR_CONFIG,
970 					MAX_COMPRESSED_FRAGS);
971 	adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
972 			REG_GET_FIELD(
973 					adev->gfx.config.gb_addr_config,
974 					GB_ADDR_CONFIG,
975 					NUM_RB_PER_SE);
976 	adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
977 			REG_GET_FIELD(
978 					adev->gfx.config.gb_addr_config,
979 					GB_ADDR_CONFIG,
980 					NUM_SHADER_ENGINES);
981 	adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
982 			REG_GET_FIELD(
983 					adev->gfx.config.gb_addr_config,
984 					GB_ADDR_CONFIG,
985 					PIPE_INTERLEAVE_SIZE));
986 
987 	return 0;
988 }
989 
gfx_v9_4_3_compute_ring_init(struct amdgpu_device * adev,int ring_id,int xcc_id,int mec,int pipe,int queue)990 static int gfx_v9_4_3_compute_ring_init(struct amdgpu_device *adev, int ring_id,
991 				        int xcc_id, int mec, int pipe, int queue)
992 {
993 	unsigned irq_type;
994 	struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
995 	unsigned int hw_prio;
996 	uint32_t xcc_doorbell_start;
997 
998 	ring = &adev->gfx.compute_ring[xcc_id * adev->gfx.num_compute_rings +
999 				       ring_id];
1000 
1001 	/* mec0 is me1 */
1002 	ring->xcc_id = xcc_id;
1003 	ring->me = mec + 1;
1004 	ring->pipe = pipe;
1005 	ring->queue = queue;
1006 
1007 	ring->ring_obj = NULL;
1008 	ring->use_doorbell = true;
1009 	xcc_doorbell_start = adev->doorbell_index.mec_ring0 +
1010 			     xcc_id * adev->doorbell_index.xcc_doorbell_range;
1011 	ring->doorbell_index = (xcc_doorbell_start + ring_id) << 1;
1012 	ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr +
1013 			     (ring_id + xcc_id * adev->gfx.num_compute_rings) *
1014 				     GFX9_MEC_HPD_SIZE;
1015 	ring->vm_hub = AMDGPU_GFXHUB(xcc_id);
1016 	sprintf(ring->name, "comp_%d.%d.%d.%d",
1017 			ring->xcc_id, ring->me, ring->pipe, ring->queue);
1018 
1019 	irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
1020 		+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
1021 		+ ring->pipe;
1022 	hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
1023 			AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
1024 	/* type-2 packets are deprecated on MEC, use type-3 instead */
1025 	return amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
1026 				hw_prio, NULL);
1027 }
1028 
gfx_v9_4_3_alloc_ip_dump(struct amdgpu_device * adev)1029 static void gfx_v9_4_3_alloc_ip_dump(struct amdgpu_device *adev)
1030 {
1031 	uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9_4_3);
1032 	uint32_t *ptr, num_xcc, inst;
1033 
1034 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1035 
1036 	ptr = kcalloc(reg_count * num_xcc, sizeof(uint32_t), GFP_KERNEL);
1037 	if (!ptr) {
1038 		DRM_ERROR("Failed to allocate memory for GFX IP Dump\n");
1039 		adev->gfx.ip_dump_core = NULL;
1040 	} else {
1041 		adev->gfx.ip_dump_core = ptr;
1042 	}
1043 
1044 	/* Allocate memory for compute queue registers for all the instances */
1045 	reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3);
1046 	inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
1047 		adev->gfx.mec.num_queue_per_pipe;
1048 
1049 	ptr = kcalloc(reg_count * inst * num_xcc, sizeof(uint32_t), GFP_KERNEL);
1050 	if (!ptr) {
1051 		DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n");
1052 		adev->gfx.ip_dump_compute_queues = NULL;
1053 	} else {
1054 		adev->gfx.ip_dump_compute_queues = ptr;
1055 	}
1056 }
1057 
gfx_v9_4_3_sw_init(void * handle)1058 static int gfx_v9_4_3_sw_init(void *handle)
1059 {
1060 	int i, j, k, r, ring_id, xcc_id, num_xcc;
1061 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1062 
1063 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1064 	case IP_VERSION(9, 4, 3):
1065 	case IP_VERSION(9, 4, 4):
1066 		adev->gfx.cleaner_shader_ptr = gfx_9_4_3_cleaner_shader_hex;
1067 		adev->gfx.cleaner_shader_size = sizeof(gfx_9_4_3_cleaner_shader_hex);
1068 		if (adev->gfx.mec_fw_version >= 153) {
1069 			adev->gfx.enable_cleaner_shader = true;
1070 			r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
1071 			if (r) {
1072 				adev->gfx.enable_cleaner_shader = false;
1073 				dev_err(adev->dev, "Failed to initialize cleaner shader\n");
1074 			}
1075 		}
1076 		break;
1077 	default:
1078 		adev->gfx.enable_cleaner_shader = false;
1079 		break;
1080 	}
1081 
1082 	adev->gfx.mec.num_mec = 2;
1083 	adev->gfx.mec.num_pipe_per_mec = 4;
1084 	adev->gfx.mec.num_queue_per_pipe = 8;
1085 
1086 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1087 
1088 	/* EOP Event */
1089 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
1090 	if (r)
1091 		return r;
1092 
1093 	/* Bad opcode Event */
1094 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP,
1095 			      GFX_9_0__SRCID__CP_BAD_OPCODE_ERROR,
1096 			      &adev->gfx.bad_op_irq);
1097 	if (r)
1098 		return r;
1099 
1100 	/* Privileged reg */
1101 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
1102 			      &adev->gfx.priv_reg_irq);
1103 	if (r)
1104 		return r;
1105 
1106 	/* Privileged inst */
1107 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT,
1108 			      &adev->gfx.priv_inst_irq);
1109 	if (r)
1110 		return r;
1111 
1112 	adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1113 
1114 	r = adev->gfx.rlc.funcs->init(adev);
1115 	if (r) {
1116 		DRM_ERROR("Failed to init rlc BOs!\n");
1117 		return r;
1118 	}
1119 
1120 	r = gfx_v9_4_3_mec_init(adev);
1121 	if (r) {
1122 		DRM_ERROR("Failed to init MEC BOs!\n");
1123 		return r;
1124 	}
1125 
1126 	/* set up the compute queues - allocate horizontally across pipes */
1127 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
1128 		ring_id = 0;
1129 		for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
1130 			for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
1131 				for (k = 0; k < adev->gfx.mec.num_pipe_per_mec;
1132 				     k++) {
1133 					if (!amdgpu_gfx_is_mec_queue_enabled(
1134 							adev, xcc_id, i, k, j))
1135 						continue;
1136 
1137 					r = gfx_v9_4_3_compute_ring_init(adev,
1138 								       ring_id,
1139 								       xcc_id,
1140 								       i, k, j);
1141 					if (r)
1142 						return r;
1143 
1144 					ring_id++;
1145 				}
1146 			}
1147 		}
1148 
1149 		r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, xcc_id);
1150 		if (r) {
1151 			DRM_ERROR("Failed to init KIQ BOs!\n");
1152 			return r;
1153 		}
1154 
1155 		r = amdgpu_gfx_kiq_init_ring(adev, xcc_id);
1156 		if (r)
1157 			return r;
1158 
1159 		/* create MQD for all compute queues as wel as KIQ for SRIOV case */
1160 		r = amdgpu_gfx_mqd_sw_init(adev,
1161 				sizeof(struct v9_mqd_allocation), xcc_id);
1162 		if (r)
1163 			return r;
1164 	}
1165 
1166 	r = gfx_v9_4_3_gpu_early_init(adev);
1167 	if (r)
1168 		return r;
1169 
1170 	r = amdgpu_gfx_ras_sw_init(adev);
1171 	if (r)
1172 		return r;
1173 
1174 
1175 	if (!amdgpu_sriov_vf(adev)) {
1176 		r = amdgpu_gfx_sysfs_init(adev);
1177 		if (r)
1178 			return r;
1179 	}
1180 
1181 	gfx_v9_4_3_alloc_ip_dump(adev);
1182 
1183 	r = amdgpu_gfx_sysfs_isolation_shader_init(adev);
1184 	if (r)
1185 		return r;
1186 
1187 	return 0;
1188 }
1189 
gfx_v9_4_3_sw_fini(void * handle)1190 static int gfx_v9_4_3_sw_fini(void *handle)
1191 {
1192 	int i, num_xcc;
1193 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1194 
1195 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1196 	for (i = 0; i < adev->gfx.num_compute_rings * num_xcc; i++)
1197 		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1198 
1199 	for (i = 0; i < num_xcc; i++) {
1200 		amdgpu_gfx_mqd_sw_fini(adev, i);
1201 		amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[i].ring);
1202 		amdgpu_gfx_kiq_fini(adev, i);
1203 	}
1204 
1205 	amdgpu_gfx_cleaner_shader_sw_fini(adev);
1206 
1207 	gfx_v9_4_3_mec_fini(adev);
1208 	amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
1209 	gfx_v9_4_3_free_microcode(adev);
1210 	if (!amdgpu_sriov_vf(adev))
1211 		amdgpu_gfx_sysfs_fini(adev);
1212 	amdgpu_gfx_sysfs_isolation_shader_fini(adev);
1213 
1214 	kfree(adev->gfx.ip_dump_core);
1215 	kfree(adev->gfx.ip_dump_compute_queues);
1216 
1217 	return 0;
1218 }
1219 
1220 #define DEFAULT_SH_MEM_BASES	(0x6000)
gfx_v9_4_3_xcc_init_compute_vmid(struct amdgpu_device * adev,int xcc_id)1221 static void gfx_v9_4_3_xcc_init_compute_vmid(struct amdgpu_device *adev,
1222 					     int xcc_id)
1223 {
1224 	int i;
1225 	uint32_t sh_mem_config;
1226 	uint32_t sh_mem_bases;
1227 	uint32_t data;
1228 
1229 	/*
1230 	 * Configure apertures:
1231 	 * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
1232 	 * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
1233 	 * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
1234 	 */
1235 	sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
1236 
1237 	sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
1238 			SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
1239 			SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
1240 
1241 	mutex_lock(&adev->srbm_mutex);
1242 	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1243 		soc15_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id));
1244 		/* CP and shaders */
1245 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSH_MEM_CONFIG, sh_mem_config);
1246 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSH_MEM_BASES, sh_mem_bases);
1247 
1248 		/* Enable trap for each kfd vmid. */
1249 		data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL);
1250 		data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
1251 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL, data);
1252 	}
1253 	soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
1254 	mutex_unlock(&adev->srbm_mutex);
1255 
1256 	/* Initialize all compute VMIDs to have no GDS, GWS, or OA
1257 	   acccess. These should be enabled by FW for target VMIDs. */
1258 	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1259 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_BASE, 2 * i, 0);
1260 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_SIZE, 2 * i, 0);
1261 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_GWS_VMID0, i, 0);
1262 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_OA_VMID0, i, 0);
1263 	}
1264 }
1265 
gfx_v9_4_3_xcc_init_gds_vmid(struct amdgpu_device * adev,int xcc_id)1266 static void gfx_v9_4_3_xcc_init_gds_vmid(struct amdgpu_device *adev, int xcc_id)
1267 {
1268 	int vmid;
1269 
1270 	/*
1271 	 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
1272 	 * access. Compute VMIDs should be enabled by FW for target VMIDs,
1273 	 * the driver can enable them for graphics. VMID0 should maintain
1274 	 * access so that HWS firmware can save/restore entries.
1275 	 */
1276 	for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
1277 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_BASE, 2 * vmid, 0);
1278 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_SIZE, 2 * vmid, 0);
1279 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_GWS_VMID0, vmid, 0);
1280 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_OA_VMID0, vmid, 0);
1281 	}
1282 }
1283 
gfx_v9_4_3_xcc_constants_init(struct amdgpu_device * adev,int xcc_id)1284 static void gfx_v9_4_3_xcc_constants_init(struct amdgpu_device *adev,
1285 					  int xcc_id)
1286 {
1287 	u32 tmp;
1288 	int i;
1289 
1290 	/* XXX SH_MEM regs */
1291 	/* where to put LDS, scratch, GPUVM in FSA64 space */
1292 	mutex_lock(&adev->srbm_mutex);
1293 	for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) {
1294 		soc15_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id));
1295 		/* CP and shaders */
1296 		if (i == 0) {
1297 			tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1298 					    SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1299 			tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
1300 					    !!adev->gmc.noretry);
1301 			WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
1302 					 regSH_MEM_CONFIG, tmp);
1303 			WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
1304 					 regSH_MEM_BASES, 0);
1305 		} else {
1306 			tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1307 					    SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1308 			tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
1309 					    !!adev->gmc.noretry);
1310 			WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
1311 					 regSH_MEM_CONFIG, tmp);
1312 			tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
1313 					    (adev->gmc.private_aperture_start >>
1314 					     48));
1315 			tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
1316 					    (adev->gmc.shared_aperture_start >>
1317 					     48));
1318 			WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
1319 					 regSH_MEM_BASES, tmp);
1320 		}
1321 	}
1322 	soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, 0));
1323 
1324 	mutex_unlock(&adev->srbm_mutex);
1325 
1326 	gfx_v9_4_3_xcc_init_compute_vmid(adev, xcc_id);
1327 	gfx_v9_4_3_xcc_init_gds_vmid(adev, xcc_id);
1328 }
1329 
gfx_v9_4_3_constants_init(struct amdgpu_device * adev)1330 static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev)
1331 {
1332 	int i, num_xcc;
1333 
1334 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1335 
1336 	gfx_v9_4_3_get_cu_info(adev, &adev->gfx.cu_info);
1337 	adev->gfx.config.db_debug2 =
1338 		RREG32_SOC15(GC, GET_INST(GC, 0), regDB_DEBUG2);
1339 
1340 	for (i = 0; i < num_xcc; i++)
1341 		gfx_v9_4_3_xcc_constants_init(adev, i);
1342 }
1343 
1344 static void
gfx_v9_4_3_xcc_enable_save_restore_machine(struct amdgpu_device * adev,int xcc_id)1345 gfx_v9_4_3_xcc_enable_save_restore_machine(struct amdgpu_device *adev,
1346 					   int xcc_id)
1347 {
1348 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_SRM_CNTL, SRM_ENABLE, 1);
1349 }
1350 
gfx_v9_4_3_xcc_init_pg(struct amdgpu_device * adev,int xcc_id)1351 static void gfx_v9_4_3_xcc_init_pg(struct amdgpu_device *adev, int xcc_id)
1352 {
1353 	/*
1354 	 * Rlc save restore list is workable since v2_1.
1355 	 * And it's needed by gfxoff feature.
1356 	 */
1357 	if (adev->gfx.rlc.is_rlc_v2_1)
1358 		gfx_v9_4_3_xcc_enable_save_restore_machine(adev, xcc_id);
1359 }
1360 
gfx_v9_4_3_xcc_disable_gpa_mode(struct amdgpu_device * adev,int xcc_id)1361 static void gfx_v9_4_3_xcc_disable_gpa_mode(struct amdgpu_device *adev, int xcc_id)
1362 {
1363 	uint32_t data;
1364 
1365 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG);
1366 	data |= CPC_PSP_DEBUG__UTCL2IUGPAOVERRIDE_MASK;
1367 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG, data);
1368 }
1369 
gfx_v9_4_3_is_rlc_enabled(struct amdgpu_device * adev)1370 static bool gfx_v9_4_3_is_rlc_enabled(struct amdgpu_device *adev)
1371 {
1372 	uint32_t rlc_setting;
1373 
1374 	/* if RLC is not enabled, do nothing */
1375 	rlc_setting = RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CNTL);
1376 	if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
1377 		return false;
1378 
1379 	return true;
1380 }
1381 
gfx_v9_4_3_xcc_set_safe_mode(struct amdgpu_device * adev,int xcc_id)1382 static void gfx_v9_4_3_xcc_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
1383 {
1384 	uint32_t data;
1385 	unsigned i;
1386 
1387 	data = RLC_SAFE_MODE__CMD_MASK;
1388 	data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
1389 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data);
1390 
1391 	/* wait for RLC_SAFE_MODE */
1392 	for (i = 0; i < adev->usec_timeout; i++) {
1393 		if (!REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
1394 			break;
1395 		udelay(1);
1396 	}
1397 }
1398 
gfx_v9_4_3_xcc_unset_safe_mode(struct amdgpu_device * adev,int xcc_id)1399 static void gfx_v9_4_3_xcc_unset_safe_mode(struct amdgpu_device *adev,
1400 					   int xcc_id)
1401 {
1402 	uint32_t data;
1403 
1404 	data = RLC_SAFE_MODE__CMD_MASK;
1405 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data);
1406 }
1407 
gfx_v9_4_3_init_rlcg_reg_access_ctrl(struct amdgpu_device * adev)1408 static void gfx_v9_4_3_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
1409 {
1410 	int xcc_id, num_xcc;
1411 	struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
1412 
1413 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1414 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
1415 		reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[GET_INST(GC, xcc_id)];
1416 		reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG0);
1417 		reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG1);
1418 		reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG2);
1419 		reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG3);
1420 		reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_CNTL);
1421 		reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX);
1422 		reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SPARE_INT);
1423 	}
1424 	adev->gfx.rlc.rlcg_reg_access_supported = true;
1425 }
1426 
gfx_v9_4_3_rlc_init(struct amdgpu_device * adev)1427 static int gfx_v9_4_3_rlc_init(struct amdgpu_device *adev)
1428 {
1429 	/* init spm vmid with 0xf */
1430 	if (adev->gfx.rlc.funcs->update_spm_vmid)
1431 		adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf);
1432 
1433 	return 0;
1434 }
1435 
gfx_v9_4_3_xcc_wait_for_rlc_serdes(struct amdgpu_device * adev,int xcc_id)1436 static void gfx_v9_4_3_xcc_wait_for_rlc_serdes(struct amdgpu_device *adev,
1437 					       int xcc_id)
1438 {
1439 	u32 i, j, k;
1440 	u32 mask;
1441 
1442 	mutex_lock(&adev->grbm_idx_mutex);
1443 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1444 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1445 			gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff,
1446 						    xcc_id);
1447 			for (k = 0; k < adev->usec_timeout; k++) {
1448 				if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SERDES_CU_MASTER_BUSY) == 0)
1449 					break;
1450 				udelay(1);
1451 			}
1452 			if (k == adev->usec_timeout) {
1453 				gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff,
1454 							    0xffffffff,
1455 							    0xffffffff, xcc_id);
1456 				mutex_unlock(&adev->grbm_idx_mutex);
1457 				DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
1458 					 i, j);
1459 				return;
1460 			}
1461 		}
1462 	}
1463 	gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
1464 				    xcc_id);
1465 	mutex_unlock(&adev->grbm_idx_mutex);
1466 
1467 	mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
1468 		RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
1469 		RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
1470 		RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
1471 	for (k = 0; k < adev->usec_timeout; k++) {
1472 		if ((RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
1473 			break;
1474 		udelay(1);
1475 	}
1476 }
1477 
gfx_v9_4_3_xcc_enable_gui_idle_interrupt(struct amdgpu_device * adev,bool enable,int xcc_id)1478 static void gfx_v9_4_3_xcc_enable_gui_idle_interrupt(struct amdgpu_device *adev,
1479 						     bool enable, int xcc_id)
1480 {
1481 	u32 tmp;
1482 
1483 	/* These interrupts should be enabled to drive DS clock */
1484 
1485 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0);
1486 
1487 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
1488 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
1489 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
1490 
1491 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0, tmp);
1492 }
1493 
gfx_v9_4_3_xcc_rlc_stop(struct amdgpu_device * adev,int xcc_id)1494 static void gfx_v9_4_3_xcc_rlc_stop(struct amdgpu_device *adev, int xcc_id)
1495 {
1496 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_CNTL,
1497 			      RLC_ENABLE_F32, 0);
1498 	gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, false, xcc_id);
1499 	gfx_v9_4_3_xcc_wait_for_rlc_serdes(adev, xcc_id);
1500 }
1501 
gfx_v9_4_3_rlc_stop(struct amdgpu_device * adev)1502 static void gfx_v9_4_3_rlc_stop(struct amdgpu_device *adev)
1503 {
1504 	int i, num_xcc;
1505 
1506 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1507 	for (i = 0; i < num_xcc; i++)
1508 		gfx_v9_4_3_xcc_rlc_stop(adev, i);
1509 }
1510 
gfx_v9_4_3_xcc_rlc_reset(struct amdgpu_device * adev,int xcc_id)1511 static void gfx_v9_4_3_xcc_rlc_reset(struct amdgpu_device *adev, int xcc_id)
1512 {
1513 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), GRBM_SOFT_RESET,
1514 			      SOFT_RESET_RLC, 1);
1515 	udelay(50);
1516 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), GRBM_SOFT_RESET,
1517 			      SOFT_RESET_RLC, 0);
1518 	udelay(50);
1519 }
1520 
gfx_v9_4_3_rlc_reset(struct amdgpu_device * adev)1521 static void gfx_v9_4_3_rlc_reset(struct amdgpu_device *adev)
1522 {
1523 	int i, num_xcc;
1524 
1525 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1526 	for (i = 0; i < num_xcc; i++)
1527 		gfx_v9_4_3_xcc_rlc_reset(adev, i);
1528 }
1529 
gfx_v9_4_3_xcc_rlc_start(struct amdgpu_device * adev,int xcc_id)1530 static void gfx_v9_4_3_xcc_rlc_start(struct amdgpu_device *adev, int xcc_id)
1531 {
1532 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_CNTL,
1533 			      RLC_ENABLE_F32, 1);
1534 	udelay(50);
1535 
1536 	/* carrizo do enable cp interrupt after cp inited */
1537 	if (!(adev->flags & AMD_IS_APU)) {
1538 		gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, true, xcc_id);
1539 		udelay(50);
1540 	}
1541 }
1542 
gfx_v9_4_3_rlc_start(struct amdgpu_device * adev)1543 static void gfx_v9_4_3_rlc_start(struct amdgpu_device *adev)
1544 {
1545 #ifdef AMDGPU_RLC_DEBUG_RETRY
1546 	u32 rlc_ucode_ver;
1547 #endif
1548 	int i, num_xcc;
1549 
1550 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1551 	for (i = 0; i < num_xcc; i++) {
1552 		gfx_v9_4_3_xcc_rlc_start(adev, i);
1553 #ifdef AMDGPU_RLC_DEBUG_RETRY
1554 		/* RLC_GPM_GENERAL_6 : RLC Ucode version */
1555 		rlc_ucode_ver = RREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_GENERAL_6);
1556 		if (rlc_ucode_ver == 0x108) {
1557 			dev_info(adev->dev,
1558 				 "Using rlc debug ucode. regRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
1559 				 rlc_ucode_ver, adev->gfx.rlc_fw_version);
1560 			/* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
1561 			 * default is 0x9C4 to create a 100us interval */
1562 			WREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_TIMER_INT_3, 0x9C4);
1563 			/* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
1564 			 * to disable the page fault retry interrupts, default is
1565 			 * 0x100 (256) */
1566 			WREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_GENERAL_12, 0x100);
1567 		}
1568 #endif
1569 	}
1570 }
1571 
gfx_v9_4_3_xcc_rlc_load_microcode(struct amdgpu_device * adev,int xcc_id)1572 static int gfx_v9_4_3_xcc_rlc_load_microcode(struct amdgpu_device *adev,
1573 					     int xcc_id)
1574 {
1575 	const struct rlc_firmware_header_v2_0 *hdr;
1576 	const __le32 *fw_data;
1577 	unsigned i, fw_size;
1578 
1579 	if (!adev->gfx.rlc_fw)
1580 		return -EINVAL;
1581 
1582 	hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1583 	amdgpu_ucode_print_rlc_hdr(&hdr->header);
1584 
1585 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1586 			   le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1587 	fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
1588 
1589 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_ADDR,
1590 			RLCG_UCODE_LOADING_START_ADDRESS);
1591 	for (i = 0; i < fw_size; i++) {
1592 		if (amdgpu_emu_mode == 1 && i % 100 == 0) {
1593 			dev_info(adev->dev, "Write RLC ucode data %u DWs\n", i);
1594 			msleep(1);
1595 		}
1596 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
1597 	}
1598 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
1599 
1600 	return 0;
1601 }
1602 
gfx_v9_4_3_xcc_rlc_resume(struct amdgpu_device * adev,int xcc_id)1603 static int gfx_v9_4_3_xcc_rlc_resume(struct amdgpu_device *adev, int xcc_id)
1604 {
1605 	int r;
1606 
1607 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1608 		gfx_v9_4_3_xcc_rlc_stop(adev, xcc_id);
1609 		/* legacy rlc firmware loading */
1610 		r = gfx_v9_4_3_xcc_rlc_load_microcode(adev, xcc_id);
1611 		if (r)
1612 			return r;
1613 		gfx_v9_4_3_xcc_rlc_start(adev, xcc_id);
1614 	}
1615 
1616 	amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
1617 	/* disable CG */
1618 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, 0);
1619 	gfx_v9_4_3_xcc_init_pg(adev, xcc_id);
1620 	amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
1621 
1622 	return 0;
1623 }
1624 
gfx_v9_4_3_rlc_resume(struct amdgpu_device * adev)1625 static int gfx_v9_4_3_rlc_resume(struct amdgpu_device *adev)
1626 {
1627 	int r, i, num_xcc;
1628 
1629 	if (amdgpu_sriov_vf(adev))
1630 		return 0;
1631 
1632 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1633 	for (i = 0; i < num_xcc; i++) {
1634 		r = gfx_v9_4_3_xcc_rlc_resume(adev, i);
1635 		if (r)
1636 			return r;
1637 	}
1638 
1639 	return 0;
1640 }
1641 
gfx_v9_4_3_update_spm_vmid(struct amdgpu_device * adev,struct amdgpu_ring * ring,unsigned vmid)1642 static void gfx_v9_4_3_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring,
1643 				       unsigned vmid)
1644 {
1645 	u32 reg, pre_data, data;
1646 
1647 	reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL);
1648 	if (amdgpu_sriov_is_pp_one_vf(adev) && !amdgpu_sriov_runtime(adev))
1649 		pre_data = RREG32_NO_KIQ(reg);
1650 	else
1651 		pre_data = RREG32(reg);
1652 
1653 	data =	pre_data & (~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK);
1654 	data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
1655 
1656 	if (pre_data != data) {
1657 		if (amdgpu_sriov_is_pp_one_vf(adev) && !amdgpu_sriov_runtime(adev)) {
1658 			WREG32_SOC15_NO_KIQ(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL, data);
1659 		} else
1660 			WREG32_SOC15(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL, data);
1661 	}
1662 }
1663 
1664 static const struct soc15_reg_rlcg rlcg_access_gc_9_4_3[] = {
1665 	{SOC15_REG_ENTRY(GC, 0, regGRBM_GFX_INDEX)},
1666 	{SOC15_REG_ENTRY(GC, 0, regSQ_IND_INDEX)},
1667 };
1668 
gfx_v9_4_3_check_rlcg_range(struct amdgpu_device * adev,uint32_t offset,struct soc15_reg_rlcg * entries,int arr_size)1669 static bool gfx_v9_4_3_check_rlcg_range(struct amdgpu_device *adev,
1670 					uint32_t offset,
1671 					struct soc15_reg_rlcg *entries, int arr_size)
1672 {
1673 	int i, inst;
1674 	uint32_t reg;
1675 
1676 	if (!entries)
1677 		return false;
1678 
1679 	for (i = 0; i < arr_size; i++) {
1680 		const struct soc15_reg_rlcg *entry;
1681 
1682 		entry = &entries[i];
1683 		inst = adev->ip_map.logical_to_dev_inst ?
1684 			       adev->ip_map.logical_to_dev_inst(
1685 				       adev, entry->hwip, entry->instance) :
1686 			       entry->instance;
1687 		reg = adev->reg_offset[entry->hwip][inst][entry->segment] +
1688 		      entry->reg;
1689 		if (offset == reg)
1690 			return true;
1691 	}
1692 
1693 	return false;
1694 }
1695 
gfx_v9_4_3_is_rlcg_access_range(struct amdgpu_device * adev,u32 offset)1696 static bool gfx_v9_4_3_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset)
1697 {
1698 	return gfx_v9_4_3_check_rlcg_range(adev, offset,
1699 					(void *)rlcg_access_gc_9_4_3,
1700 					ARRAY_SIZE(rlcg_access_gc_9_4_3));
1701 }
1702 
gfx_v9_4_3_xcc_cp_compute_enable(struct amdgpu_device * adev,bool enable,int xcc_id)1703 static void gfx_v9_4_3_xcc_cp_compute_enable(struct amdgpu_device *adev,
1704 					     bool enable, int xcc_id)
1705 {
1706 	if (enable) {
1707 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL, 0);
1708 	} else {
1709 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL,
1710 			(CP_MEC_CNTL__MEC_INVALIDATE_ICACHE_MASK |
1711 			 CP_MEC_CNTL__MEC_ME1_PIPE0_RESET_MASK |
1712 			 CP_MEC_CNTL__MEC_ME1_PIPE1_RESET_MASK |
1713 			 CP_MEC_CNTL__MEC_ME1_PIPE2_RESET_MASK |
1714 			 CP_MEC_CNTL__MEC_ME1_PIPE3_RESET_MASK |
1715 			 CP_MEC_CNTL__MEC_ME2_PIPE0_RESET_MASK |
1716 			 CP_MEC_CNTL__MEC_ME2_PIPE1_RESET_MASK |
1717 			 CP_MEC_CNTL__MEC_ME1_HALT_MASK |
1718 			 CP_MEC_CNTL__MEC_ME2_HALT_MASK));
1719 		adev->gfx.kiq[xcc_id].ring.sched.ready = false;
1720 	}
1721 	udelay(50);
1722 }
1723 
gfx_v9_4_3_xcc_cp_compute_load_microcode(struct amdgpu_device * adev,int xcc_id)1724 static int gfx_v9_4_3_xcc_cp_compute_load_microcode(struct amdgpu_device *adev,
1725 						    int xcc_id)
1726 {
1727 	const struct gfx_firmware_header_v1_0 *mec_hdr;
1728 	const __le32 *fw_data;
1729 	unsigned i;
1730 	u32 tmp;
1731 	u32 mec_ucode_addr_offset;
1732 	u32 mec_ucode_data_offset;
1733 
1734 	if (!adev->gfx.mec_fw)
1735 		return -EINVAL;
1736 
1737 	gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id);
1738 
1739 	mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1740 	amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
1741 
1742 	fw_data = (const __le32 *)
1743 		(adev->gfx.mec_fw->data +
1744 		 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
1745 	tmp = 0;
1746 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
1747 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
1748 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_CNTL, tmp);
1749 
1750 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_LO,
1751 		adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
1752 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_HI,
1753 		upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
1754 
1755 	mec_ucode_addr_offset =
1756 		SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_MEC_ME1_UCODE_ADDR);
1757 	mec_ucode_data_offset =
1758 		SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_MEC_ME1_UCODE_DATA);
1759 
1760 	/* MEC1 */
1761 	WREG32(mec_ucode_addr_offset, mec_hdr->jt_offset);
1762 	for (i = 0; i < mec_hdr->jt_size; i++)
1763 		WREG32(mec_ucode_data_offset,
1764 		       le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
1765 
1766 	WREG32(mec_ucode_addr_offset, adev->gfx.mec_fw_version);
1767 	/* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
1768 
1769 	return 0;
1770 }
1771 
1772 /* KIQ functions */
gfx_v9_4_3_xcc_kiq_setting(struct amdgpu_ring * ring,int xcc_id)1773 static void gfx_v9_4_3_xcc_kiq_setting(struct amdgpu_ring *ring, int xcc_id)
1774 {
1775 	uint32_t tmp;
1776 	struct amdgpu_device *adev = ring->adev;
1777 
1778 	/* tell RLC which is KIQ queue */
1779 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS);
1780 	tmp &= 0xffffff00;
1781 	tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
1782 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp);
1783 	tmp |= 0x80;
1784 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp);
1785 }
1786 
gfx_v9_4_3_mqd_set_priority(struct amdgpu_ring * ring,struct v9_mqd * mqd)1787 static void gfx_v9_4_3_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd)
1788 {
1789 	struct amdgpu_device *adev = ring->adev;
1790 
1791 	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
1792 		if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) {
1793 			mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
1794 			mqd->cp_hqd_queue_priority =
1795 				AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
1796 		}
1797 	}
1798 }
1799 
gfx_v9_4_3_xcc_mqd_init(struct amdgpu_ring * ring,int xcc_id)1800 static int gfx_v9_4_3_xcc_mqd_init(struct amdgpu_ring *ring, int xcc_id)
1801 {
1802 	struct amdgpu_device *adev = ring->adev;
1803 	struct v9_mqd *mqd = ring->mqd_ptr;
1804 	uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
1805 	uint32_t tmp;
1806 
1807 	mqd->header = 0xC0310800;
1808 	mqd->compute_pipelinestat_enable = 0x00000001;
1809 	mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
1810 	mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
1811 	mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
1812 	mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
1813 	mqd->compute_misc_reserved = 0x00000003;
1814 
1815 	mqd->dynamic_cu_mask_addr_lo =
1816 		lower_32_bits(ring->mqd_gpu_addr
1817 			      + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
1818 	mqd->dynamic_cu_mask_addr_hi =
1819 		upper_32_bits(ring->mqd_gpu_addr
1820 			      + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
1821 
1822 	eop_base_addr = ring->eop_gpu_addr >> 8;
1823 	mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
1824 	mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
1825 
1826 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
1827 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_CONTROL);
1828 	tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
1829 			(order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1));
1830 
1831 	mqd->cp_hqd_eop_control = tmp;
1832 
1833 	/* enable doorbell? */
1834 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL);
1835 
1836 	if (ring->use_doorbell) {
1837 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1838 				    DOORBELL_OFFSET, ring->doorbell_index);
1839 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1840 				    DOORBELL_EN, 1);
1841 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1842 				    DOORBELL_SOURCE, 0);
1843 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1844 				    DOORBELL_HIT, 0);
1845 		if (amdgpu_sriov_vf(adev))
1846 			tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1847 					    DOORBELL_MODE, 1);
1848 	} else {
1849 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1850 					 DOORBELL_EN, 0);
1851 	}
1852 
1853 	mqd->cp_hqd_pq_doorbell_control = tmp;
1854 
1855 	/* disable the queue if it's active */
1856 	ring->wptr = 0;
1857 	mqd->cp_hqd_dequeue_request = 0;
1858 	mqd->cp_hqd_pq_rptr = 0;
1859 	mqd->cp_hqd_pq_wptr_lo = 0;
1860 	mqd->cp_hqd_pq_wptr_hi = 0;
1861 
1862 	/* set the pointer to the MQD */
1863 	mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
1864 	mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
1865 
1866 	/* set MQD vmid to 0 */
1867 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MQD_CONTROL);
1868 	tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
1869 	mqd->cp_mqd_control = tmp;
1870 
1871 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
1872 	hqd_gpu_addr = ring->gpu_addr >> 8;
1873 	mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
1874 	mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
1875 
1876 	/* set up the HQD, this is similar to CP_RB0_CNTL */
1877 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_CONTROL);
1878 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
1879 			    (order_base_2(ring->ring_size / 4) - 1));
1880 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
1881 			((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
1882 #ifdef __BIG_ENDIAN
1883 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
1884 #endif
1885 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
1886 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
1887 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
1888 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
1889 	mqd->cp_hqd_pq_control = tmp;
1890 
1891 	/* set the wb address whether it's enabled or not */
1892 	wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
1893 	mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
1894 	mqd->cp_hqd_pq_rptr_report_addr_hi =
1895 		upper_32_bits(wb_gpu_addr) & 0xffff;
1896 
1897 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
1898 	wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
1899 	mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
1900 	mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
1901 
1902 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
1903 	ring->wptr = 0;
1904 	mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR);
1905 
1906 	/* set the vmid for the queue */
1907 	mqd->cp_hqd_vmid = 0;
1908 
1909 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE);
1910 	tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
1911 	mqd->cp_hqd_persistent_state = tmp;
1912 
1913 	/* set MIN_IB_AVAIL_SIZE */
1914 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL);
1915 	tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
1916 	mqd->cp_hqd_ib_control = tmp;
1917 
1918 	/* set static priority for a queue/ring */
1919 	gfx_v9_4_3_mqd_set_priority(ring, mqd);
1920 	mqd->cp_hqd_quantum = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_QUANTUM);
1921 
1922 	/* map_queues packet doesn't need activate the queue,
1923 	 * so only kiq need set this field.
1924 	 */
1925 	if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
1926 		mqd->cp_hqd_active = 1;
1927 
1928 	return 0;
1929 }
1930 
gfx_v9_4_3_xcc_kiq_init_register(struct amdgpu_ring * ring,int xcc_id)1931 static int gfx_v9_4_3_xcc_kiq_init_register(struct amdgpu_ring *ring,
1932 					    int xcc_id)
1933 {
1934 	struct amdgpu_device *adev = ring->adev;
1935 	struct v9_mqd *mqd = ring->mqd_ptr;
1936 	int j;
1937 
1938 	/* disable wptr polling */
1939 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_WPTR_POLL_CNTL, EN, 0);
1940 
1941 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR,
1942 	       mqd->cp_hqd_eop_base_addr_lo);
1943 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR_HI,
1944 	       mqd->cp_hqd_eop_base_addr_hi);
1945 
1946 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
1947 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_CONTROL,
1948 	       mqd->cp_hqd_eop_control);
1949 
1950 	/* enable doorbell? */
1951 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL,
1952 	       mqd->cp_hqd_pq_doorbell_control);
1953 
1954 	/* disable the queue if it's active */
1955 	if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1) {
1956 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1);
1957 		for (j = 0; j < adev->usec_timeout; j++) {
1958 			if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
1959 				break;
1960 			udelay(1);
1961 		}
1962 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST,
1963 		       mqd->cp_hqd_dequeue_request);
1964 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR,
1965 		       mqd->cp_hqd_pq_rptr);
1966 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO,
1967 		       mqd->cp_hqd_pq_wptr_lo);
1968 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI,
1969 		       mqd->cp_hqd_pq_wptr_hi);
1970 	}
1971 
1972 	/* set the pointer to the MQD */
1973 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR,
1974 	       mqd->cp_mqd_base_addr_lo);
1975 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR_HI,
1976 	       mqd->cp_mqd_base_addr_hi);
1977 
1978 	/* set MQD vmid to 0 */
1979 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_CONTROL,
1980 	       mqd->cp_mqd_control);
1981 
1982 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
1983 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE,
1984 	       mqd->cp_hqd_pq_base_lo);
1985 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE_HI,
1986 	       mqd->cp_hqd_pq_base_hi);
1987 
1988 	/* set up the HQD, this is similar to CP_RB0_CNTL */
1989 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_CONTROL,
1990 	       mqd->cp_hqd_pq_control);
1991 
1992 	/* set the wb address whether it's enabled or not */
1993 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR,
1994 				mqd->cp_hqd_pq_rptr_report_addr_lo);
1995 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
1996 				mqd->cp_hqd_pq_rptr_report_addr_hi);
1997 
1998 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
1999 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR,
2000 	       mqd->cp_hqd_pq_wptr_poll_addr_lo);
2001 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR_HI,
2002 	       mqd->cp_hqd_pq_wptr_poll_addr_hi);
2003 
2004 	/* enable the doorbell if requested */
2005 	if (ring->use_doorbell) {
2006 		WREG32_SOC15(
2007 			GC, GET_INST(GC, xcc_id),
2008 			regCP_MEC_DOORBELL_RANGE_LOWER,
2009 			((adev->doorbell_index.kiq +
2010 			  xcc_id * adev->doorbell_index.xcc_doorbell_range) *
2011 			 2) << 2);
2012 		WREG32_SOC15(
2013 			GC, GET_INST(GC, xcc_id),
2014 			regCP_MEC_DOORBELL_RANGE_UPPER,
2015 			((adev->doorbell_index.userqueue_end +
2016 			  xcc_id * adev->doorbell_index.xcc_doorbell_range) *
2017 			 2) << 2);
2018 	}
2019 
2020 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL,
2021 	       mqd->cp_hqd_pq_doorbell_control);
2022 
2023 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2024 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO,
2025 	       mqd->cp_hqd_pq_wptr_lo);
2026 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI,
2027 	       mqd->cp_hqd_pq_wptr_hi);
2028 
2029 	/* set the vmid for the queue */
2030 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_VMID, mqd->cp_hqd_vmid);
2031 
2032 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE,
2033 	       mqd->cp_hqd_persistent_state);
2034 
2035 	/* activate the queue */
2036 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE,
2037 	       mqd->cp_hqd_active);
2038 
2039 	if (ring->use_doorbell)
2040 		WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_STATUS, DOORBELL_ENABLE, 1);
2041 
2042 	return 0;
2043 }
2044 
gfx_v9_4_3_xcc_q_fini_register(struct amdgpu_ring * ring,int xcc_id)2045 static int gfx_v9_4_3_xcc_q_fini_register(struct amdgpu_ring *ring,
2046 					    int xcc_id)
2047 {
2048 	struct amdgpu_device *adev = ring->adev;
2049 	int j;
2050 
2051 	/* disable the queue if it's active */
2052 	if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1) {
2053 
2054 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1);
2055 
2056 		for (j = 0; j < adev->usec_timeout; j++) {
2057 			if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
2058 				break;
2059 			udelay(1);
2060 		}
2061 
2062 		if (j == AMDGPU_MAX_USEC_TIMEOUT) {
2063 			DRM_DEBUG("%s dequeue request failed.\n", ring->name);
2064 
2065 			/* Manual disable if dequeue request times out */
2066 			WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE, 0);
2067 		}
2068 
2069 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST,
2070 		      0);
2071 	}
2072 
2073 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IQ_TIMER, 0);
2074 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL, 0);
2075 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE, CP_HQD_PERSISTENT_STATE_DEFAULT);
2076 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
2077 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0);
2078 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR, 0);
2079 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI, 0);
2080 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO, 0);
2081 
2082 	return 0;
2083 }
2084 
gfx_v9_4_3_xcc_kiq_init_queue(struct amdgpu_ring * ring,int xcc_id)2085 static int gfx_v9_4_3_xcc_kiq_init_queue(struct amdgpu_ring *ring, int xcc_id)
2086 {
2087 	struct amdgpu_device *adev = ring->adev;
2088 	struct v9_mqd *mqd = ring->mqd_ptr;
2089 	struct v9_mqd *tmp_mqd;
2090 
2091 	gfx_v9_4_3_xcc_kiq_setting(ring, xcc_id);
2092 
2093 	/* GPU could be in bad state during probe, driver trigger the reset
2094 	 * after load the SMU, in this case , the mqd is not be initialized.
2095 	 * driver need to re-init the mqd.
2096 	 * check mqd->cp_hqd_pq_control since this value should not be 0
2097 	 */
2098 	tmp_mqd = (struct v9_mqd *)adev->gfx.kiq[xcc_id].mqd_backup;
2099 	if (amdgpu_in_reset(adev) && tmp_mqd->cp_hqd_pq_control) {
2100 		/* for GPU_RESET case , reset MQD to a clean status */
2101 		if (adev->gfx.kiq[xcc_id].mqd_backup)
2102 			memcpy(mqd, adev->gfx.kiq[xcc_id].mqd_backup, sizeof(struct v9_mqd_allocation));
2103 
2104 		/* reset ring buffer */
2105 		ring->wptr = 0;
2106 		amdgpu_ring_clear_ring(ring);
2107 		mutex_lock(&adev->srbm_mutex);
2108 		soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
2109 		gfx_v9_4_3_xcc_kiq_init_register(ring, xcc_id);
2110 		soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2111 		mutex_unlock(&adev->srbm_mutex);
2112 	} else {
2113 		memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
2114 		((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
2115 		((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
2116 		mutex_lock(&adev->srbm_mutex);
2117 		if (amdgpu_sriov_vf(adev) && adev->in_suspend)
2118 			amdgpu_ring_clear_ring(ring);
2119 		soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
2120 		gfx_v9_4_3_xcc_mqd_init(ring, xcc_id);
2121 		gfx_v9_4_3_xcc_kiq_init_register(ring, xcc_id);
2122 		soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2123 		mutex_unlock(&adev->srbm_mutex);
2124 
2125 		if (adev->gfx.kiq[xcc_id].mqd_backup)
2126 			memcpy(adev->gfx.kiq[xcc_id].mqd_backup, mqd, sizeof(struct v9_mqd_allocation));
2127 	}
2128 
2129 	return 0;
2130 }
2131 
gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring * ring,int xcc_id,bool restore)2132 static int gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id, bool restore)
2133 {
2134 	struct amdgpu_device *adev = ring->adev;
2135 	struct v9_mqd *mqd = ring->mqd_ptr;
2136 	int mqd_idx = ring - &adev->gfx.compute_ring[0];
2137 	struct v9_mqd *tmp_mqd;
2138 
2139 	/* Same as above kiq init, driver need to re-init the mqd if mqd->cp_hqd_pq_control
2140 	 * is not be initialized before
2141 	 */
2142 	tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx];
2143 
2144 	if (!restore && (!tmp_mqd->cp_hqd_pq_control ||
2145 	    (!amdgpu_in_reset(adev) && !adev->in_suspend))) {
2146 		memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
2147 		((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
2148 		((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
2149 		mutex_lock(&adev->srbm_mutex);
2150 		soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
2151 		gfx_v9_4_3_xcc_mqd_init(ring, xcc_id);
2152 		soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2153 		mutex_unlock(&adev->srbm_mutex);
2154 
2155 		if (adev->gfx.mec.mqd_backup[mqd_idx])
2156 			memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
2157 	} else {
2158 		/* restore MQD to a clean status */
2159 		if (adev->gfx.mec.mqd_backup[mqd_idx])
2160 			memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
2161 		/* reset ring buffer */
2162 		ring->wptr = 0;
2163 		atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0);
2164 		amdgpu_ring_clear_ring(ring);
2165 	}
2166 
2167 	return 0;
2168 }
2169 
gfx_v9_4_3_xcc_kcq_fini_register(struct amdgpu_device * adev,int xcc_id)2170 static int gfx_v9_4_3_xcc_kcq_fini_register(struct amdgpu_device *adev, int xcc_id)
2171 {
2172 	struct amdgpu_ring *ring;
2173 	int j;
2174 
2175 	for (j = 0; j < adev->gfx.num_compute_rings; j++) {
2176 		ring = &adev->gfx.compute_ring[j +  xcc_id * adev->gfx.num_compute_rings];
2177 		if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
2178 			mutex_lock(&adev->srbm_mutex);
2179 			soc15_grbm_select(adev, ring->me,
2180 					ring->pipe,
2181 					ring->queue, 0, GET_INST(GC, xcc_id));
2182 			gfx_v9_4_3_xcc_q_fini_register(ring, xcc_id);
2183 			soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2184 			mutex_unlock(&adev->srbm_mutex);
2185 		}
2186 	}
2187 
2188 	return 0;
2189 }
2190 
gfx_v9_4_3_xcc_kiq_resume(struct amdgpu_device * adev,int xcc_id)2191 static int gfx_v9_4_3_xcc_kiq_resume(struct amdgpu_device *adev, int xcc_id)
2192 {
2193 	struct amdgpu_ring *ring;
2194 	int r;
2195 
2196 	ring = &adev->gfx.kiq[xcc_id].ring;
2197 
2198 	r = amdgpu_bo_reserve(ring->mqd_obj, false);
2199 	if (unlikely(r != 0))
2200 		return r;
2201 
2202 	r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
2203 	if (unlikely(r != 0)) {
2204 		amdgpu_bo_unreserve(ring->mqd_obj);
2205 		return r;
2206 	}
2207 
2208 	gfx_v9_4_3_xcc_kiq_init_queue(ring, xcc_id);
2209 	amdgpu_bo_kunmap(ring->mqd_obj);
2210 	ring->mqd_ptr = NULL;
2211 	amdgpu_bo_unreserve(ring->mqd_obj);
2212 	return 0;
2213 }
2214 
gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device * adev,int xcc_id)2215 static int gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device *adev, int xcc_id)
2216 {
2217 	struct amdgpu_ring *ring = NULL;
2218 	int r = 0, i;
2219 
2220 	gfx_v9_4_3_xcc_cp_compute_enable(adev, true, xcc_id);
2221 
2222 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2223 		ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings];
2224 
2225 		r = amdgpu_bo_reserve(ring->mqd_obj, false);
2226 		if (unlikely(r != 0))
2227 			goto done;
2228 		r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
2229 		if (!r) {
2230 			r = gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id, false);
2231 			amdgpu_bo_kunmap(ring->mqd_obj);
2232 			ring->mqd_ptr = NULL;
2233 		}
2234 		amdgpu_bo_unreserve(ring->mqd_obj);
2235 		if (r)
2236 			goto done;
2237 	}
2238 
2239 	r = amdgpu_gfx_enable_kcq(adev, xcc_id);
2240 done:
2241 	return r;
2242 }
2243 
gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device * adev,int xcc_id)2244 static int gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device *adev, int xcc_id)
2245 {
2246 	struct amdgpu_ring *ring;
2247 	int r, j;
2248 
2249 	gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, false, xcc_id);
2250 
2251 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
2252 		gfx_v9_4_3_xcc_disable_gpa_mode(adev, xcc_id);
2253 
2254 		r = gfx_v9_4_3_xcc_cp_compute_load_microcode(adev, xcc_id);
2255 		if (r)
2256 			return r;
2257 	} else {
2258 		gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id);
2259 	}
2260 
2261 	r = gfx_v9_4_3_xcc_kiq_resume(adev, xcc_id);
2262 	if (r)
2263 		return r;
2264 
2265 	r = gfx_v9_4_3_xcc_kcq_resume(adev, xcc_id);
2266 	if (r)
2267 		return r;
2268 
2269 	for (j = 0; j < adev->gfx.num_compute_rings; j++) {
2270 		ring = &adev->gfx.compute_ring
2271 				[j + xcc_id * adev->gfx.num_compute_rings];
2272 		r = amdgpu_ring_test_helper(ring);
2273 		if (r)
2274 			return r;
2275 	}
2276 
2277 	gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, true, xcc_id);
2278 
2279 	return 0;
2280 }
2281 
gfx_v9_4_3_cp_resume(struct amdgpu_device * adev)2282 static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev)
2283 {
2284 	int r = 0, i, num_xcc, num_xcp, num_xcc_per_xcp;
2285 
2286 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2287 	if (amdgpu_sriov_vf(adev)) {
2288 		enum amdgpu_gfx_partition mode;
2289 
2290 		mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
2291 						       AMDGPU_XCP_FL_NONE);
2292 		if (mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
2293 			return -EINVAL;
2294 		num_xcc_per_xcp = gfx_v9_4_3_get_xccs_per_xcp(adev);
2295 		adev->gfx.num_xcc_per_xcp = num_xcc_per_xcp;
2296 		num_xcp = num_xcc / num_xcc_per_xcp;
2297 		r = amdgpu_xcp_init(adev->xcp_mgr, num_xcp, mode);
2298 
2299 	} else {
2300 		if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
2301 						    AMDGPU_XCP_FL_NONE) ==
2302 		    AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
2303 			r = amdgpu_xcp_switch_partition_mode(
2304 				adev->xcp_mgr, amdgpu_user_partt_mode);
2305 	}
2306 	if (r)
2307 		return r;
2308 
2309 	for (i = 0; i < num_xcc; i++) {
2310 		r = gfx_v9_4_3_xcc_cp_resume(adev, i);
2311 		if (r)
2312 			return r;
2313 	}
2314 
2315 	return 0;
2316 }
2317 
gfx_v9_4_3_xcc_fini(struct amdgpu_device * adev,int xcc_id)2318 static void gfx_v9_4_3_xcc_fini(struct amdgpu_device *adev, int xcc_id)
2319 {
2320 	if (amdgpu_gfx_disable_kcq(adev, xcc_id))
2321 		DRM_ERROR("XCD %d KCQ disable failed\n", xcc_id);
2322 
2323 	if (amdgpu_sriov_vf(adev)) {
2324 		/* must disable polling for SRIOV when hw finished, otherwise
2325 		 * CPC engine may still keep fetching WB address which is already
2326 		 * invalid after sw finished and trigger DMAR reading error in
2327 		 * hypervisor side.
2328 		 */
2329 		WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_WPTR_POLL_CNTL, EN, 0);
2330 		return;
2331 	}
2332 
2333 	/* Use deinitialize sequence from CAIL when unbinding device
2334 	 * from driver, otherwise KIQ is hanging when binding back
2335 	 */
2336 	if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
2337 		mutex_lock(&adev->srbm_mutex);
2338 		soc15_grbm_select(adev, adev->gfx.kiq[xcc_id].ring.me,
2339 				  adev->gfx.kiq[xcc_id].ring.pipe,
2340 				  adev->gfx.kiq[xcc_id].ring.queue, 0,
2341 				  GET_INST(GC, xcc_id));
2342 		gfx_v9_4_3_xcc_q_fini_register(&adev->gfx.kiq[xcc_id].ring,
2343 						 xcc_id);
2344 		soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2345 		mutex_unlock(&adev->srbm_mutex);
2346 	}
2347 
2348 	gfx_v9_4_3_xcc_kcq_fini_register(adev, xcc_id);
2349 	gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id);
2350 }
2351 
gfx_v9_4_3_hw_init(void * handle)2352 static int gfx_v9_4_3_hw_init(void *handle)
2353 {
2354 	int r;
2355 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2356 
2357 	amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size,
2358 				       adev->gfx.cleaner_shader_ptr);
2359 
2360 	if (!amdgpu_sriov_vf(adev))
2361 		gfx_v9_4_3_init_golden_registers(adev);
2362 
2363 	gfx_v9_4_3_constants_init(adev);
2364 
2365 	r = adev->gfx.rlc.funcs->resume(adev);
2366 	if (r)
2367 		return r;
2368 
2369 	r = gfx_v9_4_3_cp_resume(adev);
2370 	if (r)
2371 		return r;
2372 
2373 	return r;
2374 }
2375 
gfx_v9_4_3_hw_fini(void * handle)2376 static int gfx_v9_4_3_hw_fini(void *handle)
2377 {
2378 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2379 	int i, num_xcc;
2380 
2381 	amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
2382 	amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
2383 	amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0);
2384 
2385 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2386 	for (i = 0; i < num_xcc; i++) {
2387 		gfx_v9_4_3_xcc_fini(adev, i);
2388 	}
2389 
2390 	return 0;
2391 }
2392 
gfx_v9_4_3_suspend(void * handle)2393 static int gfx_v9_4_3_suspend(void *handle)
2394 {
2395 	return gfx_v9_4_3_hw_fini(handle);
2396 }
2397 
gfx_v9_4_3_resume(void * handle)2398 static int gfx_v9_4_3_resume(void *handle)
2399 {
2400 	return gfx_v9_4_3_hw_init(handle);
2401 }
2402 
gfx_v9_4_3_is_idle(void * handle)2403 static bool gfx_v9_4_3_is_idle(void *handle)
2404 {
2405 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2406 	int i, num_xcc;
2407 
2408 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2409 	for (i = 0; i < num_xcc; i++) {
2410 		if (REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, i), regGRBM_STATUS),
2411 					GRBM_STATUS, GUI_ACTIVE))
2412 			return false;
2413 	}
2414 	return true;
2415 }
2416 
gfx_v9_4_3_wait_for_idle(void * handle)2417 static int gfx_v9_4_3_wait_for_idle(void *handle)
2418 {
2419 	unsigned i;
2420 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2421 
2422 	for (i = 0; i < adev->usec_timeout; i++) {
2423 		if (gfx_v9_4_3_is_idle(handle))
2424 			return 0;
2425 		udelay(1);
2426 	}
2427 	return -ETIMEDOUT;
2428 }
2429 
gfx_v9_4_3_soft_reset(void * handle)2430 static int gfx_v9_4_3_soft_reset(void *handle)
2431 {
2432 	u32 grbm_soft_reset = 0;
2433 	u32 tmp;
2434 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2435 
2436 	/* GRBM_STATUS */
2437 	tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_STATUS);
2438 	if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
2439 		   GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
2440 		   GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
2441 		   GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
2442 		   GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
2443 		   GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
2444 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2445 						GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
2446 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2447 						GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
2448 	}
2449 
2450 	if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
2451 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2452 						GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
2453 	}
2454 
2455 	/* GRBM_STATUS2 */
2456 	tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_STATUS2);
2457 	if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
2458 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2459 						GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
2460 
2461 
2462 	if (grbm_soft_reset) {
2463 		/* stop the rlc */
2464 		adev->gfx.rlc.funcs->stop(adev);
2465 
2466 		/* Disable MEC parsing/prefetching */
2467 		gfx_v9_4_3_xcc_cp_compute_enable(adev, false, 0);
2468 
2469 		if (grbm_soft_reset) {
2470 			tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
2471 			tmp |= grbm_soft_reset;
2472 			dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
2473 			WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
2474 			tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
2475 
2476 			udelay(50);
2477 
2478 			tmp &= ~grbm_soft_reset;
2479 			WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
2480 			tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
2481 		}
2482 
2483 		/* Wait a little for things to settle down */
2484 		udelay(50);
2485 	}
2486 	return 0;
2487 }
2488 
gfx_v9_4_3_ring_emit_gds_switch(struct amdgpu_ring * ring,uint32_t vmid,uint32_t gds_base,uint32_t gds_size,uint32_t gws_base,uint32_t gws_size,uint32_t oa_base,uint32_t oa_size)2489 static void gfx_v9_4_3_ring_emit_gds_switch(struct amdgpu_ring *ring,
2490 					  uint32_t vmid,
2491 					  uint32_t gds_base, uint32_t gds_size,
2492 					  uint32_t gws_base, uint32_t gws_size,
2493 					  uint32_t oa_base, uint32_t oa_size)
2494 {
2495 	struct amdgpu_device *adev = ring->adev;
2496 
2497 	/* GDS Base */
2498 	gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2499 				   SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_VMID0_BASE) + 2 * vmid,
2500 				   gds_base);
2501 
2502 	/* GDS Size */
2503 	gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2504 				   SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_VMID0_SIZE) + 2 * vmid,
2505 				   gds_size);
2506 
2507 	/* GWS */
2508 	gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2509 				   SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_GWS_VMID0) + vmid,
2510 				   gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
2511 
2512 	/* OA */
2513 	gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2514 				   SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_OA_VMID0) + vmid,
2515 				   (1 << (oa_size + oa_base)) - (1 << oa_base));
2516 }
2517 
gfx_v9_4_3_early_init(void * handle)2518 static int gfx_v9_4_3_early_init(void *handle)
2519 {
2520 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2521 
2522 	adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
2523 					  AMDGPU_MAX_COMPUTE_RINGS);
2524 	gfx_v9_4_3_set_kiq_pm4_funcs(adev);
2525 	gfx_v9_4_3_set_ring_funcs(adev);
2526 	gfx_v9_4_3_set_irq_funcs(adev);
2527 	gfx_v9_4_3_set_gds_init(adev);
2528 	gfx_v9_4_3_set_rlc_funcs(adev);
2529 
2530 	/* init rlcg reg access ctrl */
2531 	gfx_v9_4_3_init_rlcg_reg_access_ctrl(adev);
2532 
2533 	return gfx_v9_4_3_init_microcode(adev);
2534 }
2535 
gfx_v9_4_3_late_init(void * handle)2536 static int gfx_v9_4_3_late_init(void *handle)
2537 {
2538 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2539 	int r;
2540 
2541 	r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
2542 	if (r)
2543 		return r;
2544 
2545 	r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
2546 	if (r)
2547 		return r;
2548 
2549 	r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0);
2550 	if (r)
2551 		return r;
2552 
2553 	if (adev->gfx.ras &&
2554 	    adev->gfx.ras->enable_watchdog_timer)
2555 		adev->gfx.ras->enable_watchdog_timer(adev);
2556 
2557 	return 0;
2558 }
2559 
gfx_v9_4_3_xcc_update_sram_fgcg(struct amdgpu_device * adev,bool enable,int xcc_id)2560 static void gfx_v9_4_3_xcc_update_sram_fgcg(struct amdgpu_device *adev,
2561 					    bool enable, int xcc_id)
2562 {
2563 	uint32_t def, data;
2564 
2565 	if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG))
2566 		return;
2567 
2568 	def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
2569 				  regRLC_CGTT_MGCG_OVERRIDE);
2570 
2571 	if (enable)
2572 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
2573 	else
2574 		data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
2575 
2576 	if (def != data)
2577 		WREG32_SOC15(GC, GET_INST(GC, xcc_id),
2578 			     regRLC_CGTT_MGCG_OVERRIDE, data);
2579 
2580 }
2581 
gfx_v9_4_3_xcc_update_repeater_fgcg(struct amdgpu_device * adev,bool enable,int xcc_id)2582 static void gfx_v9_4_3_xcc_update_repeater_fgcg(struct amdgpu_device *adev,
2583 						bool enable, int xcc_id)
2584 {
2585 	uint32_t def, data;
2586 
2587 	if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG))
2588 		return;
2589 
2590 	def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
2591 				  regRLC_CGTT_MGCG_OVERRIDE);
2592 
2593 	if (enable)
2594 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_REP_FGCG_OVERRIDE_MASK;
2595 	else
2596 		data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REP_FGCG_OVERRIDE_MASK;
2597 
2598 	if (def != data)
2599 		WREG32_SOC15(GC, GET_INST(GC, xcc_id),
2600 			     regRLC_CGTT_MGCG_OVERRIDE, data);
2601 }
2602 
2603 static void
gfx_v9_4_3_xcc_update_medium_grain_clock_gating(struct amdgpu_device * adev,bool enable,int xcc_id)2604 gfx_v9_4_3_xcc_update_medium_grain_clock_gating(struct amdgpu_device *adev,
2605 						bool enable, int xcc_id)
2606 {
2607 	uint32_t data, def;
2608 
2609 	/* It is disabled by HW by default */
2610 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
2611 		/* 1 - RLC_CGTT_MGCG_OVERRIDE */
2612 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
2613 
2614 		data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
2615 			  RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
2616 			  RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
2617 			  RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
2618 
2619 		if (def != data)
2620 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
2621 
2622 		/* MGLS is a global flag to control all MGLS in GFX */
2623 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
2624 			/* 2 - RLC memory Light sleep */
2625 			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
2626 				def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL);
2627 				data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
2628 				if (def != data)
2629 					WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL, data);
2630 			}
2631 			/* 3 - CP memory Light sleep */
2632 			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
2633 				def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL);
2634 				data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
2635 				if (def != data)
2636 					WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL, data);
2637 			}
2638 		}
2639 	} else {
2640 		/* 1 - MGCG_OVERRIDE */
2641 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
2642 
2643 		data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
2644 			 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
2645 			 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
2646 			 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
2647 
2648 		if (def != data)
2649 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
2650 
2651 		/* 2 - disable MGLS in RLC */
2652 		data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL);
2653 		if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
2654 			data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
2655 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL, data);
2656 		}
2657 
2658 		/* 3 - disable MGLS in CP */
2659 		data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL);
2660 		if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
2661 			data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
2662 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL, data);
2663 		}
2664 	}
2665 
2666 }
2667 
2668 static void
gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(struct amdgpu_device * adev,bool enable,int xcc_id)2669 gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
2670 						bool enable, int xcc_id)
2671 {
2672 	uint32_t def, data;
2673 
2674 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
2675 
2676 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
2677 		/* unset CGCG override */
2678 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
2679 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
2680 			data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
2681 		else
2682 			data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
2683 		/* update CGCG and CGLS override bits */
2684 		if (def != data)
2685 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
2686 
2687 		/* CGCG Hysteresis: 400us */
2688 		def = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL);
2689 
2690 		data = (0x2710
2691 			<< RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
2692 		       RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
2693 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
2694 			data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
2695 				RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
2696 		if (def != data)
2697 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data);
2698 
2699 		/* set IDLE_POLL_COUNT(0x33450100)*/
2700 		def = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL);
2701 		data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
2702 			(0x3345 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
2703 		if (def != data)
2704 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL, data);
2705 	} else {
2706 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL);
2707 		/* reset CGCG/CGLS bits */
2708 		data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
2709 		/* disable cgcg and cgls in FSM */
2710 		if (def != data)
2711 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data);
2712 	}
2713 
2714 }
2715 
gfx_v9_4_3_xcc_update_gfx_clock_gating(struct amdgpu_device * adev,bool enable,int xcc_id)2716 static int gfx_v9_4_3_xcc_update_gfx_clock_gating(struct amdgpu_device *adev,
2717 						  bool enable, int xcc_id)
2718 {
2719 	amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
2720 
2721 	if (enable) {
2722 		/* FGCG */
2723 		gfx_v9_4_3_xcc_update_sram_fgcg(adev, enable, xcc_id);
2724 		gfx_v9_4_3_xcc_update_repeater_fgcg(adev, enable, xcc_id);
2725 
2726 		/* CGCG/CGLS should be enabled after MGCG/MGLS
2727 		 * ===  MGCG + MGLS ===
2728 		 */
2729 		gfx_v9_4_3_xcc_update_medium_grain_clock_gating(adev, enable,
2730 								xcc_id);
2731 		/* ===  CGCG + CGLS === */
2732 		gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(adev, enable,
2733 								xcc_id);
2734 	} else {
2735 		/* CGCG/CGLS should be disabled before MGCG/MGLS
2736 		 * ===  CGCG + CGLS ===
2737 		 */
2738 		gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(adev, enable,
2739 								xcc_id);
2740 		/* ===  MGCG + MGLS === */
2741 		gfx_v9_4_3_xcc_update_medium_grain_clock_gating(adev, enable,
2742 								xcc_id);
2743 
2744 		/* FGCG */
2745 		gfx_v9_4_3_xcc_update_sram_fgcg(adev, enable, xcc_id);
2746 		gfx_v9_4_3_xcc_update_repeater_fgcg(adev, enable, xcc_id);
2747 	}
2748 
2749 	amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
2750 
2751 	return 0;
2752 }
2753 
2754 static const struct amdgpu_rlc_funcs gfx_v9_4_3_rlc_funcs = {
2755 	.is_rlc_enabled = gfx_v9_4_3_is_rlc_enabled,
2756 	.set_safe_mode = gfx_v9_4_3_xcc_set_safe_mode,
2757 	.unset_safe_mode = gfx_v9_4_3_xcc_unset_safe_mode,
2758 	.init = gfx_v9_4_3_rlc_init,
2759 	.resume = gfx_v9_4_3_rlc_resume,
2760 	.stop = gfx_v9_4_3_rlc_stop,
2761 	.reset = gfx_v9_4_3_rlc_reset,
2762 	.start = gfx_v9_4_3_rlc_start,
2763 	.update_spm_vmid = gfx_v9_4_3_update_spm_vmid,
2764 	.is_rlcg_access_range = gfx_v9_4_3_is_rlcg_access_range,
2765 };
2766 
gfx_v9_4_3_set_powergating_state(void * handle,enum amd_powergating_state state)2767 static int gfx_v9_4_3_set_powergating_state(void *handle,
2768 					  enum amd_powergating_state state)
2769 {
2770 	return 0;
2771 }
2772 
gfx_v9_4_3_set_clockgating_state(void * handle,enum amd_clockgating_state state)2773 static int gfx_v9_4_3_set_clockgating_state(void *handle,
2774 					  enum amd_clockgating_state state)
2775 {
2776 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2777 	int i, num_xcc;
2778 
2779 	if (amdgpu_sriov_vf(adev))
2780 		return 0;
2781 
2782 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2783 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2784 	case IP_VERSION(9, 4, 3):
2785 	case IP_VERSION(9, 4, 4):
2786 		for (i = 0; i < num_xcc; i++)
2787 			gfx_v9_4_3_xcc_update_gfx_clock_gating(
2788 				adev, state == AMD_CG_STATE_GATE, i);
2789 		break;
2790 	default:
2791 		break;
2792 	}
2793 	return 0;
2794 }
2795 
gfx_v9_4_3_get_clockgating_state(void * handle,u64 * flags)2796 static void gfx_v9_4_3_get_clockgating_state(void *handle, u64 *flags)
2797 {
2798 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2799 	int data;
2800 
2801 	if (amdgpu_sriov_vf(adev))
2802 		*flags = 0;
2803 
2804 	/* AMD_CG_SUPPORT_GFX_MGCG */
2805 	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_CGTT_MGCG_OVERRIDE));
2806 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
2807 		*flags |= AMD_CG_SUPPORT_GFX_MGCG;
2808 
2809 	/* AMD_CG_SUPPORT_GFX_CGCG */
2810 	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_CGCG_CGLS_CTRL));
2811 	if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
2812 		*flags |= AMD_CG_SUPPORT_GFX_CGCG;
2813 
2814 	/* AMD_CG_SUPPORT_GFX_CGLS */
2815 	if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
2816 		*flags |= AMD_CG_SUPPORT_GFX_CGLS;
2817 
2818 	/* AMD_CG_SUPPORT_GFX_RLC_LS */
2819 	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_MEM_SLP_CNTL));
2820 	if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
2821 		*flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
2822 
2823 	/* AMD_CG_SUPPORT_GFX_CP_LS */
2824 	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regCP_MEM_SLP_CNTL));
2825 	if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
2826 		*flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
2827 }
2828 
gfx_v9_4_3_ring_emit_hdp_flush(struct amdgpu_ring * ring)2829 static void gfx_v9_4_3_ring_emit_hdp_flush(struct amdgpu_ring *ring)
2830 {
2831 	struct amdgpu_device *adev = ring->adev;
2832 	u32 ref_and_mask, reg_mem_engine;
2833 	const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
2834 
2835 	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
2836 		switch (ring->me) {
2837 		case 1:
2838 			ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
2839 			break;
2840 		case 2:
2841 			ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
2842 			break;
2843 		default:
2844 			return;
2845 		}
2846 		reg_mem_engine = 0;
2847 	} else {
2848 		ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
2849 		reg_mem_engine = 1; /* pfp */
2850 	}
2851 
2852 	gfx_v9_4_3_wait_reg_mem(ring, reg_mem_engine, 0, 1,
2853 			      adev->nbio.funcs->get_hdp_flush_req_offset(adev),
2854 			      adev->nbio.funcs->get_hdp_flush_done_offset(adev),
2855 			      ref_and_mask, ref_and_mask, 0x20);
2856 }
2857 
gfx_v9_4_3_ring_emit_ib_compute(struct amdgpu_ring * ring,struct amdgpu_job * job,struct amdgpu_ib * ib,uint32_t flags)2858 static void gfx_v9_4_3_ring_emit_ib_compute(struct amdgpu_ring *ring,
2859 					  struct amdgpu_job *job,
2860 					  struct amdgpu_ib *ib,
2861 					  uint32_t flags)
2862 {
2863 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
2864 	u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
2865 
2866 	/* Currently, there is a high possibility to get wave ID mismatch
2867 	 * between ME and GDS, leading to a hw deadlock, because ME generates
2868 	 * different wave IDs than the GDS expects. This situation happens
2869 	 * randomly when at least 5 compute pipes use GDS ordered append.
2870 	 * The wave IDs generated by ME are also wrong after suspend/resume.
2871 	 * Those are probably bugs somewhere else in the kernel driver.
2872 	 *
2873 	 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
2874 	 * GDS to 0 for this ring (me/pipe).
2875 	 */
2876 	if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
2877 		amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2878 		amdgpu_ring_write(ring, regGDS_COMPUTE_MAX_WAVE_ID);
2879 		amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
2880 	}
2881 
2882 	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2883 	BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
2884 	amdgpu_ring_write(ring,
2885 #ifdef __BIG_ENDIAN
2886 				(2 << 0) |
2887 #endif
2888 				lower_32_bits(ib->gpu_addr));
2889 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
2890 	amdgpu_ring_write(ring, control);
2891 }
2892 
gfx_v9_4_3_ring_emit_fence(struct amdgpu_ring * ring,u64 addr,u64 seq,unsigned flags)2893 static void gfx_v9_4_3_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
2894 				     u64 seq, unsigned flags)
2895 {
2896 	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
2897 	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
2898 	bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY;
2899 
2900 	/* RELEASE_MEM - flush caches, send int */
2901 	amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
2902 	amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN |
2903 					       EOP_TC_NC_ACTION_EN) :
2904 					      (EOP_TCL1_ACTION_EN |
2905 					       EOP_TC_ACTION_EN |
2906 					       EOP_TC_WB_ACTION_EN |
2907 					       EOP_TC_MD_ACTION_EN)) |
2908 				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
2909 				 EVENT_INDEX(5)));
2910 	amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
2911 
2912 	/*
2913 	 * the address should be Qword aligned if 64bit write, Dword
2914 	 * aligned if only send 32bit data low (discard data high)
2915 	 */
2916 	if (write64bit)
2917 		BUG_ON(addr & 0x7);
2918 	else
2919 		BUG_ON(addr & 0x3);
2920 	amdgpu_ring_write(ring, lower_32_bits(addr));
2921 	amdgpu_ring_write(ring, upper_32_bits(addr));
2922 	amdgpu_ring_write(ring, lower_32_bits(seq));
2923 	amdgpu_ring_write(ring, upper_32_bits(seq));
2924 	amdgpu_ring_write(ring, 0);
2925 }
2926 
gfx_v9_4_3_ring_emit_pipeline_sync(struct amdgpu_ring * ring)2927 static void gfx_v9_4_3_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
2928 {
2929 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
2930 	uint32_t seq = ring->fence_drv.sync_seq;
2931 	uint64_t addr = ring->fence_drv.gpu_addr;
2932 
2933 	gfx_v9_4_3_wait_reg_mem(ring, usepfp, 1, 0,
2934 			      lower_32_bits(addr), upper_32_bits(addr),
2935 			      seq, 0xffffffff, 4);
2936 }
2937 
gfx_v9_4_3_ring_emit_vm_flush(struct amdgpu_ring * ring,unsigned vmid,uint64_t pd_addr)2938 static void gfx_v9_4_3_ring_emit_vm_flush(struct amdgpu_ring *ring,
2939 					unsigned vmid, uint64_t pd_addr)
2940 {
2941 	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
2942 }
2943 
gfx_v9_4_3_ring_get_rptr_compute(struct amdgpu_ring * ring)2944 static u64 gfx_v9_4_3_ring_get_rptr_compute(struct amdgpu_ring *ring)
2945 {
2946 	return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */
2947 }
2948 
gfx_v9_4_3_ring_get_wptr_compute(struct amdgpu_ring * ring)2949 static u64 gfx_v9_4_3_ring_get_wptr_compute(struct amdgpu_ring *ring)
2950 {
2951 	u64 wptr;
2952 
2953 	/* XXX check if swapping is necessary on BE */
2954 	if (ring->use_doorbell)
2955 		wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]);
2956 	else
2957 		BUG();
2958 	return wptr;
2959 }
2960 
gfx_v9_4_3_ring_set_wptr_compute(struct amdgpu_ring * ring)2961 static void gfx_v9_4_3_ring_set_wptr_compute(struct amdgpu_ring *ring)
2962 {
2963 	struct amdgpu_device *adev = ring->adev;
2964 
2965 	/* XXX check if swapping is necessary on BE */
2966 	if (ring->use_doorbell) {
2967 		atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr);
2968 		WDOORBELL64(ring->doorbell_index, ring->wptr);
2969 	} else {
2970 		BUG(); /* only DOORBELL method supported on gfx9 now */
2971 	}
2972 }
2973 
gfx_v9_4_3_ring_emit_fence_kiq(struct amdgpu_ring * ring,u64 addr,u64 seq,unsigned int flags)2974 static void gfx_v9_4_3_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
2975 					 u64 seq, unsigned int flags)
2976 {
2977 	struct amdgpu_device *adev = ring->adev;
2978 
2979 	/* we only allocate 32bit for each seq wb address */
2980 	BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
2981 
2982 	/* write fence seq to the "addr" */
2983 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2984 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2985 				 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
2986 	amdgpu_ring_write(ring, lower_32_bits(addr));
2987 	amdgpu_ring_write(ring, upper_32_bits(addr));
2988 	amdgpu_ring_write(ring, lower_32_bits(seq));
2989 
2990 	if (flags & AMDGPU_FENCE_FLAG_INT) {
2991 		/* set register to trigger INT */
2992 		amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2993 		amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2994 					 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
2995 		amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regCPC_INT_STATUS));
2996 		amdgpu_ring_write(ring, 0);
2997 		amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
2998 	}
2999 }
3000 
gfx_v9_4_3_ring_emit_rreg(struct amdgpu_ring * ring,uint32_t reg,uint32_t reg_val_offs)3001 static void gfx_v9_4_3_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
3002 				    uint32_t reg_val_offs)
3003 {
3004 	struct amdgpu_device *adev = ring->adev;
3005 
3006 	reg = gfx_v9_4_3_normalize_xcc_reg_offset(reg);
3007 
3008 	amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
3009 	amdgpu_ring_write(ring, 0 |	/* src: register*/
3010 				(5 << 8) |	/* dst: memory */
3011 				(1 << 20));	/* write confirm */
3012 	amdgpu_ring_write(ring, reg);
3013 	amdgpu_ring_write(ring, 0);
3014 	amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
3015 				reg_val_offs * 4));
3016 	amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
3017 				reg_val_offs * 4));
3018 }
3019 
gfx_v9_4_3_ring_emit_wreg(struct amdgpu_ring * ring,uint32_t reg,uint32_t val)3020 static void gfx_v9_4_3_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
3021 				    uint32_t val)
3022 {
3023 	uint32_t cmd = 0;
3024 
3025 	reg = gfx_v9_4_3_normalize_xcc_reg_offset(reg);
3026 
3027 	switch (ring->funcs->type) {
3028 	case AMDGPU_RING_TYPE_GFX:
3029 		cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
3030 		break;
3031 	case AMDGPU_RING_TYPE_KIQ:
3032 		cmd = (1 << 16); /* no inc addr */
3033 		break;
3034 	default:
3035 		cmd = WR_CONFIRM;
3036 		break;
3037 	}
3038 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3039 	amdgpu_ring_write(ring, cmd);
3040 	amdgpu_ring_write(ring, reg);
3041 	amdgpu_ring_write(ring, 0);
3042 	amdgpu_ring_write(ring, val);
3043 }
3044 
gfx_v9_4_3_ring_emit_reg_wait(struct amdgpu_ring * ring,uint32_t reg,uint32_t val,uint32_t mask)3045 static void gfx_v9_4_3_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
3046 					uint32_t val, uint32_t mask)
3047 {
3048 	gfx_v9_4_3_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
3049 }
3050 
gfx_v9_4_3_ring_emit_reg_write_reg_wait(struct amdgpu_ring * ring,uint32_t reg0,uint32_t reg1,uint32_t ref,uint32_t mask)3051 static void gfx_v9_4_3_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
3052 						  uint32_t reg0, uint32_t reg1,
3053 						  uint32_t ref, uint32_t mask)
3054 {
3055 	amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
3056 						   ref, mask);
3057 }
3058 
gfx_v9_4_3_ring_soft_recovery(struct amdgpu_ring * ring,unsigned vmid)3059 static void gfx_v9_4_3_ring_soft_recovery(struct amdgpu_ring *ring,
3060 					  unsigned vmid)
3061 {
3062 	struct amdgpu_device *adev = ring->adev;
3063 	uint32_t value = 0;
3064 
3065 	value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
3066 	value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
3067 	value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
3068 	value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
3069 	amdgpu_gfx_rlc_enter_safe_mode(adev, ring->xcc_id);
3070 	WREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regSQ_CMD, value);
3071 	amdgpu_gfx_rlc_exit_safe_mode(adev, ring->xcc_id);
3072 }
3073 
gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(struct amdgpu_device * adev,int me,int pipe,enum amdgpu_interrupt_state state,int xcc_id)3074 static void gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3075 	struct amdgpu_device *adev, int me, int pipe,
3076 	enum amdgpu_interrupt_state state, int xcc_id)
3077 {
3078 	u32 mec_int_cntl, mec_int_cntl_reg;
3079 
3080 	/*
3081 	 * amdgpu controls only the first MEC. That's why this function only
3082 	 * handles the setting of interrupts for this specific MEC. All other
3083 	 * pipes' interrupts are set by amdkfd.
3084 	 */
3085 
3086 	if (me == 1) {
3087 		switch (pipe) {
3088 		case 0:
3089 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE0_INT_CNTL);
3090 			break;
3091 		case 1:
3092 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE1_INT_CNTL);
3093 			break;
3094 		case 2:
3095 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE2_INT_CNTL);
3096 			break;
3097 		case 3:
3098 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE3_INT_CNTL);
3099 			break;
3100 		default:
3101 			DRM_DEBUG("invalid pipe %d\n", pipe);
3102 			return;
3103 		}
3104 	} else {
3105 		DRM_DEBUG("invalid me %d\n", me);
3106 		return;
3107 	}
3108 
3109 	switch (state) {
3110 	case AMDGPU_IRQ_STATE_DISABLE:
3111 		mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id);
3112 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3113 					     TIME_STAMP_INT_ENABLE, 0);
3114 		WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id);
3115 		break;
3116 	case AMDGPU_IRQ_STATE_ENABLE:
3117 		mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id);
3118 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3119 					     TIME_STAMP_INT_ENABLE, 1);
3120 		WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id);
3121 		break;
3122 	default:
3123 		break;
3124 	}
3125 }
3126 
gfx_v9_4_3_get_cpc_int_cntl(struct amdgpu_device * adev,int xcc_id,int me,int pipe)3127 static u32 gfx_v9_4_3_get_cpc_int_cntl(struct amdgpu_device *adev,
3128 				     int xcc_id, int me, int pipe)
3129 {
3130 	/*
3131 	 * amdgpu controls only the first MEC. That's why this function only
3132 	 * handles the setting of interrupts for this specific MEC. All other
3133 	 * pipes' interrupts are set by amdkfd.
3134 	 */
3135 	if (me != 1)
3136 		return 0;
3137 
3138 	switch (pipe) {
3139 	case 0:
3140 		return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE0_INT_CNTL);
3141 	case 1:
3142 		return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE1_INT_CNTL);
3143 	case 2:
3144 		return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE2_INT_CNTL);
3145 	case 3:
3146 		return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE3_INT_CNTL);
3147 	default:
3148 		return 0;
3149 	}
3150 }
3151 
gfx_v9_4_3_set_priv_reg_fault_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)3152 static int gfx_v9_4_3_set_priv_reg_fault_state(struct amdgpu_device *adev,
3153 					     struct amdgpu_irq_src *source,
3154 					     unsigned type,
3155 					     enum amdgpu_interrupt_state state)
3156 {
3157 	u32 mec_int_cntl_reg, mec_int_cntl;
3158 	int i, j, k, num_xcc;
3159 
3160 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3161 	switch (state) {
3162 	case AMDGPU_IRQ_STATE_DISABLE:
3163 	case AMDGPU_IRQ_STATE_ENABLE:
3164 		for (i = 0; i < num_xcc; i++) {
3165 			WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
3166 					      PRIV_REG_INT_ENABLE,
3167 					      state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3168 			for (j = 0; j < adev->gfx.mec.num_mec; j++) {
3169 				for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
3170 					/* MECs start at 1 */
3171 					mec_int_cntl_reg = gfx_v9_4_3_get_cpc_int_cntl(adev, i, j + 1, k);
3172 
3173 					if (mec_int_cntl_reg) {
3174 						mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, i);
3175 						mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3176 									     PRIV_REG_INT_ENABLE,
3177 									     state == AMDGPU_IRQ_STATE_ENABLE ?
3178 									     1 : 0);
3179 						WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, i);
3180 					}
3181 				}
3182 			}
3183 		}
3184 		break;
3185 	default:
3186 		break;
3187 	}
3188 
3189 	return 0;
3190 }
3191 
gfx_v9_4_3_set_bad_op_fault_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)3192 static int gfx_v9_4_3_set_bad_op_fault_state(struct amdgpu_device *adev,
3193 					     struct amdgpu_irq_src *source,
3194 					     unsigned type,
3195 					     enum amdgpu_interrupt_state state)
3196 {
3197 	u32 mec_int_cntl_reg, mec_int_cntl;
3198 	int i, j, k, num_xcc;
3199 
3200 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3201 	switch (state) {
3202 	case AMDGPU_IRQ_STATE_DISABLE:
3203 	case AMDGPU_IRQ_STATE_ENABLE:
3204 		for (i = 0; i < num_xcc; i++) {
3205 			WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
3206 					      OPCODE_ERROR_INT_ENABLE,
3207 					      state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3208 			for (j = 0; j < adev->gfx.mec.num_mec; j++) {
3209 				for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
3210 					/* MECs start at 1 */
3211 					mec_int_cntl_reg = gfx_v9_4_3_get_cpc_int_cntl(adev, i, j + 1, k);
3212 
3213 					if (mec_int_cntl_reg) {
3214 						mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, i);
3215 						mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3216 									     OPCODE_ERROR_INT_ENABLE,
3217 									     state == AMDGPU_IRQ_STATE_ENABLE ?
3218 									     1 : 0);
3219 						WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, i);
3220 					}
3221 				}
3222 			}
3223 		}
3224 		break;
3225 	default:
3226 		break;
3227 	}
3228 
3229 	return 0;
3230 }
3231 
gfx_v9_4_3_set_priv_inst_fault_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)3232 static int gfx_v9_4_3_set_priv_inst_fault_state(struct amdgpu_device *adev,
3233 					      struct amdgpu_irq_src *source,
3234 					      unsigned type,
3235 					      enum amdgpu_interrupt_state state)
3236 {
3237 	int i, num_xcc;
3238 
3239 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3240 	switch (state) {
3241 	case AMDGPU_IRQ_STATE_DISABLE:
3242 	case AMDGPU_IRQ_STATE_ENABLE:
3243 		for (i = 0; i < num_xcc; i++)
3244 			WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
3245 				PRIV_INSTR_INT_ENABLE,
3246 				state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3247 		break;
3248 	default:
3249 		break;
3250 	}
3251 
3252 	return 0;
3253 }
3254 
gfx_v9_4_3_set_eop_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)3255 static int gfx_v9_4_3_set_eop_interrupt_state(struct amdgpu_device *adev,
3256 					    struct amdgpu_irq_src *src,
3257 					    unsigned type,
3258 					    enum amdgpu_interrupt_state state)
3259 {
3260 	int i, num_xcc;
3261 
3262 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3263 	for (i = 0; i < num_xcc; i++) {
3264 		switch (type) {
3265 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
3266 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3267 				adev, 1, 0, state, i);
3268 			break;
3269 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
3270 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3271 				adev, 1, 1, state, i);
3272 			break;
3273 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
3274 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3275 				adev, 1, 2, state, i);
3276 			break;
3277 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
3278 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3279 				adev, 1, 3, state, i);
3280 			break;
3281 		case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
3282 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3283 				adev, 2, 0, state, i);
3284 			break;
3285 		case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
3286 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3287 				adev, 2, 1, state, i);
3288 			break;
3289 		case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
3290 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3291 				adev, 2, 2, state, i);
3292 			break;
3293 		case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
3294 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3295 				adev, 2, 3, state, i);
3296 			break;
3297 		default:
3298 			break;
3299 		}
3300 	}
3301 
3302 	return 0;
3303 }
3304 
gfx_v9_4_3_eop_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)3305 static int gfx_v9_4_3_eop_irq(struct amdgpu_device *adev,
3306 			    struct amdgpu_irq_src *source,
3307 			    struct amdgpu_iv_entry *entry)
3308 {
3309 	int i, xcc_id;
3310 	u8 me_id, pipe_id, queue_id;
3311 	struct amdgpu_ring *ring;
3312 
3313 	DRM_DEBUG("IH: CP EOP\n");
3314 	me_id = (entry->ring_id & 0x0c) >> 2;
3315 	pipe_id = (entry->ring_id & 0x03) >> 0;
3316 	queue_id = (entry->ring_id & 0x70) >> 4;
3317 
3318 	xcc_id = gfx_v9_4_3_ih_to_xcc_inst(adev, entry->node_id);
3319 
3320 	if (xcc_id == -EINVAL)
3321 		return -EINVAL;
3322 
3323 	switch (me_id) {
3324 	case 0:
3325 	case 1:
3326 	case 2:
3327 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3328 			ring = &adev->gfx.compute_ring
3329 					[i +
3330 					 xcc_id * adev->gfx.num_compute_rings];
3331 			/* Per-queue interrupt is supported for MEC starting from VI.
3332 			  * The interrupt can only be enabled/disabled per pipe instead of per queue.
3333 			  */
3334 
3335 			if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
3336 				amdgpu_fence_process(ring);
3337 		}
3338 		break;
3339 	}
3340 	return 0;
3341 }
3342 
gfx_v9_4_3_fault(struct amdgpu_device * adev,struct amdgpu_iv_entry * entry)3343 static void gfx_v9_4_3_fault(struct amdgpu_device *adev,
3344 			   struct amdgpu_iv_entry *entry)
3345 {
3346 	u8 me_id, pipe_id, queue_id;
3347 	struct amdgpu_ring *ring;
3348 	int i, xcc_id;
3349 
3350 	me_id = (entry->ring_id & 0x0c) >> 2;
3351 	pipe_id = (entry->ring_id & 0x03) >> 0;
3352 	queue_id = (entry->ring_id & 0x70) >> 4;
3353 
3354 	xcc_id = gfx_v9_4_3_ih_to_xcc_inst(adev, entry->node_id);
3355 
3356 	if (xcc_id == -EINVAL)
3357 		return;
3358 
3359 	switch (me_id) {
3360 	case 0:
3361 	case 1:
3362 	case 2:
3363 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3364 			ring = &adev->gfx.compute_ring
3365 					[i +
3366 					 xcc_id * adev->gfx.num_compute_rings];
3367 			if (ring->me == me_id && ring->pipe == pipe_id &&
3368 			    ring->queue == queue_id)
3369 				drm_sched_fault(&ring->sched);
3370 		}
3371 		break;
3372 	}
3373 }
3374 
gfx_v9_4_3_priv_reg_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)3375 static int gfx_v9_4_3_priv_reg_irq(struct amdgpu_device *adev,
3376 				 struct amdgpu_irq_src *source,
3377 				 struct amdgpu_iv_entry *entry)
3378 {
3379 	DRM_ERROR("Illegal register access in command stream\n");
3380 	gfx_v9_4_3_fault(adev, entry);
3381 	return 0;
3382 }
3383 
gfx_v9_4_3_bad_op_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)3384 static int gfx_v9_4_3_bad_op_irq(struct amdgpu_device *adev,
3385 				 struct amdgpu_irq_src *source,
3386 				 struct amdgpu_iv_entry *entry)
3387 {
3388 	DRM_ERROR("Illegal opcode in command stream\n");
3389 	gfx_v9_4_3_fault(adev, entry);
3390 	return 0;
3391 }
3392 
gfx_v9_4_3_priv_inst_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)3393 static int gfx_v9_4_3_priv_inst_irq(struct amdgpu_device *adev,
3394 				  struct amdgpu_irq_src *source,
3395 				  struct amdgpu_iv_entry *entry)
3396 {
3397 	DRM_ERROR("Illegal instruction in command stream\n");
3398 	gfx_v9_4_3_fault(adev, entry);
3399 	return 0;
3400 }
3401 
gfx_v9_4_3_emit_mem_sync(struct amdgpu_ring * ring)3402 static void gfx_v9_4_3_emit_mem_sync(struct amdgpu_ring *ring)
3403 {
3404 	const unsigned int cp_coher_cntl =
3405 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(1) |
3406 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(1) |
3407 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(1) |
3408 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(1) |
3409 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(1);
3410 
3411 	/* ACQUIRE_MEM -make one or more surfaces valid for use by the subsequent operations */
3412 	amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
3413 	amdgpu_ring_write(ring, cp_coher_cntl); /* CP_COHER_CNTL */
3414 	amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
3415 	amdgpu_ring_write(ring, 0xffffff);  /* CP_COHER_SIZE_HI */
3416 	amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
3417 	amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE_HI */
3418 	amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
3419 }
3420 
gfx_v9_4_3_emit_wave_limit_cs(struct amdgpu_ring * ring,uint32_t pipe,bool enable)3421 static void gfx_v9_4_3_emit_wave_limit_cs(struct amdgpu_ring *ring,
3422 					uint32_t pipe, bool enable)
3423 {
3424 	struct amdgpu_device *adev = ring->adev;
3425 	uint32_t val;
3426 	uint32_t wcl_cs_reg;
3427 
3428 	/* regSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */
3429 	val = enable ? 0x1 : 0x7f;
3430 
3431 	switch (pipe) {
3432 	case 0:
3433 		wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS0);
3434 		break;
3435 	case 1:
3436 		wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS1);
3437 		break;
3438 	case 2:
3439 		wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS2);
3440 		break;
3441 	case 3:
3442 		wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS3);
3443 		break;
3444 	default:
3445 		DRM_DEBUG("invalid pipe %d\n", pipe);
3446 		return;
3447 	}
3448 
3449 	amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val);
3450 
3451 }
gfx_v9_4_3_emit_wave_limit(struct amdgpu_ring * ring,bool enable)3452 static void gfx_v9_4_3_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
3453 {
3454 	struct amdgpu_device *adev = ring->adev;
3455 	uint32_t val;
3456 	int i;
3457 
3458 	/* regSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit
3459 	 * number of gfx waves. Setting 5 bit will make sure gfx only gets
3460 	 * around 25% of gpu resources.
3461 	 */
3462 	val = enable ? 0x1f : 0x07ffffff;
3463 	amdgpu_ring_emit_wreg(ring,
3464 			      SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_GFX),
3465 			      val);
3466 
3467 	/* Restrict waves for normal/low priority compute queues as well
3468 	 * to get best QoS for high priority compute jobs.
3469 	 *
3470 	 * amdgpu controls only 1st ME(0-3 CS pipes).
3471 	 */
3472 	for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
3473 		if (i != ring->pipe)
3474 			gfx_v9_4_3_emit_wave_limit_cs(ring, i, enable);
3475 
3476 	}
3477 }
3478 
gfx_v9_4_3_unmap_done(struct amdgpu_device * adev,uint32_t me,uint32_t pipe,uint32_t queue,uint32_t xcc_id)3479 static int gfx_v9_4_3_unmap_done(struct amdgpu_device *adev, uint32_t me,
3480 				uint32_t pipe, uint32_t queue,
3481 				uint32_t xcc_id)
3482 {
3483 	int i, r;
3484 	/* make sure dequeue is complete*/
3485 	gfx_v9_4_3_xcc_set_safe_mode(adev, xcc_id);
3486 	mutex_lock(&adev->srbm_mutex);
3487 	soc15_grbm_select(adev, me, pipe, queue, 0, GET_INST(GC, xcc_id));
3488 	for (i = 0; i < adev->usec_timeout; i++) {
3489 		if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
3490 			break;
3491 		udelay(1);
3492 	}
3493 	if (i >= adev->usec_timeout)
3494 		r = -ETIMEDOUT;
3495 	else
3496 		r = 0;
3497 	soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
3498 	mutex_unlock(&adev->srbm_mutex);
3499 	gfx_v9_4_3_xcc_unset_safe_mode(adev, xcc_id);
3500 
3501 	return r;
3502 
3503 }
3504 
gfx_v9_4_3_pipe_reset_support(struct amdgpu_device * adev)3505 static bool gfx_v9_4_3_pipe_reset_support(struct amdgpu_device *adev)
3506 {
3507 	/*TODO: Need check gfx9.4.4 mec fw whether supports pipe reset as well.*/
3508 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) &&
3509 			adev->gfx.mec_fw_version >= 0x0000009b)
3510 		return true;
3511 	else
3512 		dev_warn_once(adev->dev, "Please use the latest MEC version to see whether support pipe reset\n");
3513 
3514 	return false;
3515 }
3516 
gfx_v9_4_3_reset_hw_pipe(struct amdgpu_ring * ring)3517 static int gfx_v9_4_3_reset_hw_pipe(struct amdgpu_ring *ring)
3518 {
3519 	struct amdgpu_device *adev = ring->adev;
3520 	uint32_t reset_pipe, clean_pipe;
3521 	int r;
3522 
3523 	if (!gfx_v9_4_3_pipe_reset_support(adev))
3524 		return -EINVAL;
3525 
3526 	gfx_v9_4_3_xcc_set_safe_mode(adev, ring->xcc_id);
3527 	mutex_lock(&adev->srbm_mutex);
3528 
3529 	reset_pipe = RREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regCP_MEC_CNTL);
3530 	clean_pipe = reset_pipe;
3531 
3532 	if (ring->me == 1) {
3533 		switch (ring->pipe) {
3534 		case 0:
3535 			reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
3536 						   MEC_ME1_PIPE0_RESET, 1);
3537 			break;
3538 		case 1:
3539 			reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
3540 						   MEC_ME1_PIPE1_RESET, 1);
3541 			break;
3542 		case 2:
3543 			reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
3544 						   MEC_ME1_PIPE2_RESET, 1);
3545 			break;
3546 		case 3:
3547 			reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
3548 						   MEC_ME1_PIPE3_RESET, 1);
3549 			break;
3550 		default:
3551 			break;
3552 		}
3553 	} else {
3554 		if (ring->pipe)
3555 			reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
3556 						   MEC_ME2_PIPE1_RESET, 1);
3557 		else
3558 			reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
3559 						   MEC_ME2_PIPE0_RESET, 1);
3560 	}
3561 
3562 	WREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regCP_MEC_CNTL, reset_pipe);
3563 	WREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regCP_MEC_CNTL, clean_pipe);
3564 	mutex_unlock(&adev->srbm_mutex);
3565 	gfx_v9_4_3_xcc_unset_safe_mode(adev, ring->xcc_id);
3566 
3567 	r = gfx_v9_4_3_unmap_done(adev, ring->me, ring->pipe, ring->queue, ring->xcc_id);
3568 	return r;
3569 }
3570 
gfx_v9_4_3_reset_kcq(struct amdgpu_ring * ring,unsigned int vmid)3571 static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring,
3572 				unsigned int vmid)
3573 {
3574 	struct amdgpu_device *adev = ring->adev;
3575 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[ring->xcc_id];
3576 	struct amdgpu_ring *kiq_ring = &kiq->ring;
3577 	unsigned long flags;
3578 	int r;
3579 
3580 	if (amdgpu_sriov_vf(adev))
3581 		return -EINVAL;
3582 
3583 	if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
3584 		return -EINVAL;
3585 
3586 	spin_lock_irqsave(&kiq->ring_lock, flags);
3587 
3588 	if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
3589 		spin_unlock_irqrestore(&kiq->ring_lock, flags);
3590 		return -ENOMEM;
3591 	}
3592 
3593 	kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES,
3594 				   0, 0);
3595 	amdgpu_ring_commit(kiq_ring);
3596 
3597 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
3598 
3599 	r = amdgpu_ring_test_ring(kiq_ring);
3600 	if (r) {
3601 		dev_err(adev->dev, "kiq ring test failed after ring: %s queue reset\n",
3602 				ring->name);
3603 		goto pipe_reset;
3604 	}
3605 
3606 	r = gfx_v9_4_3_unmap_done(adev, ring->me, ring->pipe, ring->queue, ring->xcc_id);
3607 	if (r)
3608 		dev_err(adev->dev, "fail to wait on hqd deactive and will try pipe reset\n");
3609 
3610 pipe_reset:
3611 	if(r) {
3612 		r = gfx_v9_4_3_reset_hw_pipe(ring);
3613 		dev_info(adev->dev, "ring: %s pipe reset :%s\n", ring->name,
3614 				r ? "failed" : "successfully");
3615 		if (r)
3616 			return r;
3617 	}
3618 
3619 	r = amdgpu_bo_reserve(ring->mqd_obj, false);
3620 	if (unlikely(r != 0)){
3621 		dev_err(adev->dev, "fail to resv mqd_obj\n");
3622 		return r;
3623 	}
3624 	r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3625 	if (!r) {
3626 		r = gfx_v9_4_3_xcc_kcq_init_queue(ring, ring->xcc_id, true);
3627 		amdgpu_bo_kunmap(ring->mqd_obj);
3628 		ring->mqd_ptr = NULL;
3629 	}
3630 	amdgpu_bo_unreserve(ring->mqd_obj);
3631 	if (r) {
3632 		dev_err(adev->dev, "fail to unresv mqd_obj\n");
3633 		return r;
3634 	}
3635 	spin_lock_irqsave(&kiq->ring_lock, flags);
3636 	r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size);
3637 	if (r) {
3638 		spin_unlock_irqrestore(&kiq->ring_lock, flags);
3639 		return -ENOMEM;
3640 	}
3641 	kiq->pmf->kiq_map_queues(kiq_ring, ring);
3642 	amdgpu_ring_commit(kiq_ring);
3643 	r = amdgpu_ring_test_ring(kiq_ring);
3644 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
3645 	if (r) {
3646 		dev_err(adev->dev, "fail to remap queue\n");
3647 		return r;
3648 	}
3649 	return amdgpu_ring_test_ring(ring);
3650 }
3651 
3652 enum amdgpu_gfx_cp_ras_mem_id {
3653 	AMDGPU_GFX_CP_MEM1 = 1,
3654 	AMDGPU_GFX_CP_MEM2,
3655 	AMDGPU_GFX_CP_MEM3,
3656 	AMDGPU_GFX_CP_MEM4,
3657 	AMDGPU_GFX_CP_MEM5,
3658 };
3659 
3660 enum amdgpu_gfx_gcea_ras_mem_id {
3661 	AMDGPU_GFX_GCEA_IOWR_CMDMEM = 4,
3662 	AMDGPU_GFX_GCEA_IORD_CMDMEM,
3663 	AMDGPU_GFX_GCEA_GMIWR_CMDMEM,
3664 	AMDGPU_GFX_GCEA_GMIRD_CMDMEM,
3665 	AMDGPU_GFX_GCEA_DRAMWR_CMDMEM,
3666 	AMDGPU_GFX_GCEA_DRAMRD_CMDMEM,
3667 	AMDGPU_GFX_GCEA_MAM_DMEM0,
3668 	AMDGPU_GFX_GCEA_MAM_DMEM1,
3669 	AMDGPU_GFX_GCEA_MAM_DMEM2,
3670 	AMDGPU_GFX_GCEA_MAM_DMEM3,
3671 	AMDGPU_GFX_GCEA_MAM_AMEM0,
3672 	AMDGPU_GFX_GCEA_MAM_AMEM1,
3673 	AMDGPU_GFX_GCEA_MAM_AMEM2,
3674 	AMDGPU_GFX_GCEA_MAM_AMEM3,
3675 	AMDGPU_GFX_GCEA_MAM_AFLUSH_BUFFER,
3676 	AMDGPU_GFX_GCEA_WRET_TAGMEM,
3677 	AMDGPU_GFX_GCEA_RRET_TAGMEM,
3678 	AMDGPU_GFX_GCEA_IOWR_DATAMEM,
3679 	AMDGPU_GFX_GCEA_GMIWR_DATAMEM,
3680 	AMDGPU_GFX_GCEA_DRAM_DATAMEM,
3681 };
3682 
3683 enum amdgpu_gfx_gc_cane_ras_mem_id {
3684 	AMDGPU_GFX_GC_CANE_MEM0 = 0,
3685 };
3686 
3687 enum amdgpu_gfx_gcutcl2_ras_mem_id {
3688 	AMDGPU_GFX_GCUTCL2_MEM2P512X95 = 160,
3689 };
3690 
3691 enum amdgpu_gfx_gds_ras_mem_id {
3692 	AMDGPU_GFX_GDS_MEM0 = 0,
3693 };
3694 
3695 enum amdgpu_gfx_lds_ras_mem_id {
3696 	AMDGPU_GFX_LDS_BANK0 = 0,
3697 	AMDGPU_GFX_LDS_BANK1,
3698 	AMDGPU_GFX_LDS_BANK2,
3699 	AMDGPU_GFX_LDS_BANK3,
3700 	AMDGPU_GFX_LDS_BANK4,
3701 	AMDGPU_GFX_LDS_BANK5,
3702 	AMDGPU_GFX_LDS_BANK6,
3703 	AMDGPU_GFX_LDS_BANK7,
3704 	AMDGPU_GFX_LDS_BANK8,
3705 	AMDGPU_GFX_LDS_BANK9,
3706 	AMDGPU_GFX_LDS_BANK10,
3707 	AMDGPU_GFX_LDS_BANK11,
3708 	AMDGPU_GFX_LDS_BANK12,
3709 	AMDGPU_GFX_LDS_BANK13,
3710 	AMDGPU_GFX_LDS_BANK14,
3711 	AMDGPU_GFX_LDS_BANK15,
3712 	AMDGPU_GFX_LDS_BANK16,
3713 	AMDGPU_GFX_LDS_BANK17,
3714 	AMDGPU_GFX_LDS_BANK18,
3715 	AMDGPU_GFX_LDS_BANK19,
3716 	AMDGPU_GFX_LDS_BANK20,
3717 	AMDGPU_GFX_LDS_BANK21,
3718 	AMDGPU_GFX_LDS_BANK22,
3719 	AMDGPU_GFX_LDS_BANK23,
3720 	AMDGPU_GFX_LDS_BANK24,
3721 	AMDGPU_GFX_LDS_BANK25,
3722 	AMDGPU_GFX_LDS_BANK26,
3723 	AMDGPU_GFX_LDS_BANK27,
3724 	AMDGPU_GFX_LDS_BANK28,
3725 	AMDGPU_GFX_LDS_BANK29,
3726 	AMDGPU_GFX_LDS_BANK30,
3727 	AMDGPU_GFX_LDS_BANK31,
3728 	AMDGPU_GFX_LDS_SP_BUFFER_A,
3729 	AMDGPU_GFX_LDS_SP_BUFFER_B,
3730 };
3731 
3732 enum amdgpu_gfx_rlc_ras_mem_id {
3733 	AMDGPU_GFX_RLC_GPMF32 = 1,
3734 	AMDGPU_GFX_RLC_RLCVF32,
3735 	AMDGPU_GFX_RLC_SCRATCH,
3736 	AMDGPU_GFX_RLC_SRM_ARAM,
3737 	AMDGPU_GFX_RLC_SRM_DRAM,
3738 	AMDGPU_GFX_RLC_TCTAG,
3739 	AMDGPU_GFX_RLC_SPM_SE,
3740 	AMDGPU_GFX_RLC_SPM_GRBMT,
3741 };
3742 
3743 enum amdgpu_gfx_sp_ras_mem_id {
3744 	AMDGPU_GFX_SP_SIMDID0 = 0,
3745 };
3746 
3747 enum amdgpu_gfx_spi_ras_mem_id {
3748 	AMDGPU_GFX_SPI_MEM0 = 0,
3749 	AMDGPU_GFX_SPI_MEM1,
3750 	AMDGPU_GFX_SPI_MEM2,
3751 	AMDGPU_GFX_SPI_MEM3,
3752 };
3753 
3754 enum amdgpu_gfx_sqc_ras_mem_id {
3755 	AMDGPU_GFX_SQC_INST_CACHE_A = 100,
3756 	AMDGPU_GFX_SQC_INST_CACHE_B = 101,
3757 	AMDGPU_GFX_SQC_INST_CACHE_TAG_A = 102,
3758 	AMDGPU_GFX_SQC_INST_CACHE_TAG_B = 103,
3759 	AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_A = 104,
3760 	AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_B = 105,
3761 	AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_A = 106,
3762 	AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_B = 107,
3763 	AMDGPU_GFX_SQC_DATA_CACHE_A = 200,
3764 	AMDGPU_GFX_SQC_DATA_CACHE_B = 201,
3765 	AMDGPU_GFX_SQC_DATA_CACHE_TAG_A = 202,
3766 	AMDGPU_GFX_SQC_DATA_CACHE_TAG_B = 203,
3767 	AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_A = 204,
3768 	AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_B = 205,
3769 	AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_A = 206,
3770 	AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_B = 207,
3771 	AMDGPU_GFX_SQC_DIRTY_BIT_A = 208,
3772 	AMDGPU_GFX_SQC_DIRTY_BIT_B = 209,
3773 	AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU0 = 210,
3774 	AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU1 = 211,
3775 	AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A = 212,
3776 	AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B = 213,
3777 	AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_INST_CACHE = 108,
3778 };
3779 
3780 enum amdgpu_gfx_sq_ras_mem_id {
3781 	AMDGPU_GFX_SQ_SGPR_MEM0 = 0,
3782 	AMDGPU_GFX_SQ_SGPR_MEM1,
3783 	AMDGPU_GFX_SQ_SGPR_MEM2,
3784 	AMDGPU_GFX_SQ_SGPR_MEM3,
3785 };
3786 
3787 enum amdgpu_gfx_ta_ras_mem_id {
3788 	AMDGPU_GFX_TA_FS_AFIFO_RAM_LO = 1,
3789 	AMDGPU_GFX_TA_FS_AFIFO_RAM_HI,
3790 	AMDGPU_GFX_TA_FS_CFIFO_RAM,
3791 	AMDGPU_GFX_TA_FSX_LFIFO,
3792 	AMDGPU_GFX_TA_FS_DFIFO_RAM,
3793 };
3794 
3795 enum amdgpu_gfx_tcc_ras_mem_id {
3796 	AMDGPU_GFX_TCC_MEM1 = 1,
3797 };
3798 
3799 enum amdgpu_gfx_tca_ras_mem_id {
3800 	AMDGPU_GFX_TCA_MEM1 = 1,
3801 };
3802 
3803 enum amdgpu_gfx_tci_ras_mem_id {
3804 	AMDGPU_GFX_TCIW_MEM = 1,
3805 };
3806 
3807 enum amdgpu_gfx_tcp_ras_mem_id {
3808 	AMDGPU_GFX_TCP_LFIFO0 = 1,
3809 	AMDGPU_GFX_TCP_SET0BANK0_RAM,
3810 	AMDGPU_GFX_TCP_SET0BANK1_RAM,
3811 	AMDGPU_GFX_TCP_SET0BANK2_RAM,
3812 	AMDGPU_GFX_TCP_SET0BANK3_RAM,
3813 	AMDGPU_GFX_TCP_SET1BANK0_RAM,
3814 	AMDGPU_GFX_TCP_SET1BANK1_RAM,
3815 	AMDGPU_GFX_TCP_SET1BANK2_RAM,
3816 	AMDGPU_GFX_TCP_SET1BANK3_RAM,
3817 	AMDGPU_GFX_TCP_SET2BANK0_RAM,
3818 	AMDGPU_GFX_TCP_SET2BANK1_RAM,
3819 	AMDGPU_GFX_TCP_SET2BANK2_RAM,
3820 	AMDGPU_GFX_TCP_SET2BANK3_RAM,
3821 	AMDGPU_GFX_TCP_SET3BANK0_RAM,
3822 	AMDGPU_GFX_TCP_SET3BANK1_RAM,
3823 	AMDGPU_GFX_TCP_SET3BANK2_RAM,
3824 	AMDGPU_GFX_TCP_SET3BANK3_RAM,
3825 	AMDGPU_GFX_TCP_VM_FIFO,
3826 	AMDGPU_GFX_TCP_DB_TAGRAM0,
3827 	AMDGPU_GFX_TCP_DB_TAGRAM1,
3828 	AMDGPU_GFX_TCP_DB_TAGRAM2,
3829 	AMDGPU_GFX_TCP_DB_TAGRAM3,
3830 	AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE0,
3831 	AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE1,
3832 	AMDGPU_GFX_TCP_CMD_FIFO,
3833 };
3834 
3835 enum amdgpu_gfx_td_ras_mem_id {
3836 	AMDGPU_GFX_TD_UTD_CS_FIFO_MEM = 1,
3837 	AMDGPU_GFX_TD_UTD_SS_FIFO_LO_MEM,
3838 	AMDGPU_GFX_TD_UTD_SS_FIFO_HI_MEM,
3839 };
3840 
3841 enum amdgpu_gfx_tcx_ras_mem_id {
3842 	AMDGPU_GFX_TCX_FIFOD0 = 0,
3843 	AMDGPU_GFX_TCX_FIFOD1,
3844 	AMDGPU_GFX_TCX_FIFOD2,
3845 	AMDGPU_GFX_TCX_FIFOD3,
3846 	AMDGPU_GFX_TCX_FIFOD4,
3847 	AMDGPU_GFX_TCX_FIFOD5,
3848 	AMDGPU_GFX_TCX_FIFOD6,
3849 	AMDGPU_GFX_TCX_FIFOD7,
3850 	AMDGPU_GFX_TCX_FIFOB0,
3851 	AMDGPU_GFX_TCX_FIFOB1,
3852 	AMDGPU_GFX_TCX_FIFOB2,
3853 	AMDGPU_GFX_TCX_FIFOB3,
3854 	AMDGPU_GFX_TCX_FIFOB4,
3855 	AMDGPU_GFX_TCX_FIFOB5,
3856 	AMDGPU_GFX_TCX_FIFOB6,
3857 	AMDGPU_GFX_TCX_FIFOB7,
3858 	AMDGPU_GFX_TCX_FIFOA0,
3859 	AMDGPU_GFX_TCX_FIFOA1,
3860 	AMDGPU_GFX_TCX_FIFOA2,
3861 	AMDGPU_GFX_TCX_FIFOA3,
3862 	AMDGPU_GFX_TCX_FIFOA4,
3863 	AMDGPU_GFX_TCX_FIFOA5,
3864 	AMDGPU_GFX_TCX_FIFOA6,
3865 	AMDGPU_GFX_TCX_FIFOA7,
3866 	AMDGPU_GFX_TCX_CFIFO0,
3867 	AMDGPU_GFX_TCX_CFIFO1,
3868 	AMDGPU_GFX_TCX_CFIFO2,
3869 	AMDGPU_GFX_TCX_CFIFO3,
3870 	AMDGPU_GFX_TCX_CFIFO4,
3871 	AMDGPU_GFX_TCX_CFIFO5,
3872 	AMDGPU_GFX_TCX_CFIFO6,
3873 	AMDGPU_GFX_TCX_CFIFO7,
3874 	AMDGPU_GFX_TCX_FIFO_ACKB0,
3875 	AMDGPU_GFX_TCX_FIFO_ACKB1,
3876 	AMDGPU_GFX_TCX_FIFO_ACKB2,
3877 	AMDGPU_GFX_TCX_FIFO_ACKB3,
3878 	AMDGPU_GFX_TCX_FIFO_ACKB4,
3879 	AMDGPU_GFX_TCX_FIFO_ACKB5,
3880 	AMDGPU_GFX_TCX_FIFO_ACKB6,
3881 	AMDGPU_GFX_TCX_FIFO_ACKB7,
3882 	AMDGPU_GFX_TCX_FIFO_ACKD0,
3883 	AMDGPU_GFX_TCX_FIFO_ACKD1,
3884 	AMDGPU_GFX_TCX_FIFO_ACKD2,
3885 	AMDGPU_GFX_TCX_FIFO_ACKD3,
3886 	AMDGPU_GFX_TCX_FIFO_ACKD4,
3887 	AMDGPU_GFX_TCX_FIFO_ACKD5,
3888 	AMDGPU_GFX_TCX_FIFO_ACKD6,
3889 	AMDGPU_GFX_TCX_FIFO_ACKD7,
3890 	AMDGPU_GFX_TCX_DST_FIFOA0,
3891 	AMDGPU_GFX_TCX_DST_FIFOA1,
3892 	AMDGPU_GFX_TCX_DST_FIFOA2,
3893 	AMDGPU_GFX_TCX_DST_FIFOA3,
3894 	AMDGPU_GFX_TCX_DST_FIFOA4,
3895 	AMDGPU_GFX_TCX_DST_FIFOA5,
3896 	AMDGPU_GFX_TCX_DST_FIFOA6,
3897 	AMDGPU_GFX_TCX_DST_FIFOA7,
3898 	AMDGPU_GFX_TCX_DST_FIFOB0,
3899 	AMDGPU_GFX_TCX_DST_FIFOB1,
3900 	AMDGPU_GFX_TCX_DST_FIFOB2,
3901 	AMDGPU_GFX_TCX_DST_FIFOB3,
3902 	AMDGPU_GFX_TCX_DST_FIFOB4,
3903 	AMDGPU_GFX_TCX_DST_FIFOB5,
3904 	AMDGPU_GFX_TCX_DST_FIFOB6,
3905 	AMDGPU_GFX_TCX_DST_FIFOB7,
3906 	AMDGPU_GFX_TCX_DST_FIFOD0,
3907 	AMDGPU_GFX_TCX_DST_FIFOD1,
3908 	AMDGPU_GFX_TCX_DST_FIFOD2,
3909 	AMDGPU_GFX_TCX_DST_FIFOD3,
3910 	AMDGPU_GFX_TCX_DST_FIFOD4,
3911 	AMDGPU_GFX_TCX_DST_FIFOD5,
3912 	AMDGPU_GFX_TCX_DST_FIFOD6,
3913 	AMDGPU_GFX_TCX_DST_FIFOD7,
3914 	AMDGPU_GFX_TCX_DST_FIFO_ACKB0,
3915 	AMDGPU_GFX_TCX_DST_FIFO_ACKB1,
3916 	AMDGPU_GFX_TCX_DST_FIFO_ACKB2,
3917 	AMDGPU_GFX_TCX_DST_FIFO_ACKB3,
3918 	AMDGPU_GFX_TCX_DST_FIFO_ACKB4,
3919 	AMDGPU_GFX_TCX_DST_FIFO_ACKB5,
3920 	AMDGPU_GFX_TCX_DST_FIFO_ACKB6,
3921 	AMDGPU_GFX_TCX_DST_FIFO_ACKB7,
3922 	AMDGPU_GFX_TCX_DST_FIFO_ACKD0,
3923 	AMDGPU_GFX_TCX_DST_FIFO_ACKD1,
3924 	AMDGPU_GFX_TCX_DST_FIFO_ACKD2,
3925 	AMDGPU_GFX_TCX_DST_FIFO_ACKD3,
3926 	AMDGPU_GFX_TCX_DST_FIFO_ACKD4,
3927 	AMDGPU_GFX_TCX_DST_FIFO_ACKD5,
3928 	AMDGPU_GFX_TCX_DST_FIFO_ACKD6,
3929 	AMDGPU_GFX_TCX_DST_FIFO_ACKD7,
3930 };
3931 
3932 enum amdgpu_gfx_atc_l2_ras_mem_id {
3933 	AMDGPU_GFX_ATC_L2_MEM0 = 0,
3934 };
3935 
3936 enum amdgpu_gfx_utcl2_ras_mem_id {
3937 	AMDGPU_GFX_UTCL2_MEM0 = 0,
3938 };
3939 
3940 enum amdgpu_gfx_vml2_ras_mem_id {
3941 	AMDGPU_GFX_VML2_MEM0 = 0,
3942 };
3943 
3944 enum amdgpu_gfx_vml2_walker_ras_mem_id {
3945 	AMDGPU_GFX_VML2_WALKER_MEM0 = 0,
3946 };
3947 
3948 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_cp_mem_list[] = {
3949 	{AMDGPU_GFX_CP_MEM1, "CP_MEM1"},
3950 	{AMDGPU_GFX_CP_MEM2, "CP_MEM2"},
3951 	{AMDGPU_GFX_CP_MEM3, "CP_MEM3"},
3952 	{AMDGPU_GFX_CP_MEM4, "CP_MEM4"},
3953 	{AMDGPU_GFX_CP_MEM5, "CP_MEM5"},
3954 };
3955 
3956 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gcea_mem_list[] = {
3957 	{AMDGPU_GFX_GCEA_IOWR_CMDMEM, "GCEA_IOWR_CMDMEM"},
3958 	{AMDGPU_GFX_GCEA_IORD_CMDMEM, "GCEA_IORD_CMDMEM"},
3959 	{AMDGPU_GFX_GCEA_GMIWR_CMDMEM, "GCEA_GMIWR_CMDMEM"},
3960 	{AMDGPU_GFX_GCEA_GMIRD_CMDMEM, "GCEA_GMIRD_CMDMEM"},
3961 	{AMDGPU_GFX_GCEA_DRAMWR_CMDMEM, "GCEA_DRAMWR_CMDMEM"},
3962 	{AMDGPU_GFX_GCEA_DRAMRD_CMDMEM, "GCEA_DRAMRD_CMDMEM"},
3963 	{AMDGPU_GFX_GCEA_MAM_DMEM0, "GCEA_MAM_DMEM0"},
3964 	{AMDGPU_GFX_GCEA_MAM_DMEM1, "GCEA_MAM_DMEM1"},
3965 	{AMDGPU_GFX_GCEA_MAM_DMEM2, "GCEA_MAM_DMEM2"},
3966 	{AMDGPU_GFX_GCEA_MAM_DMEM3, "GCEA_MAM_DMEM3"},
3967 	{AMDGPU_GFX_GCEA_MAM_AMEM0, "GCEA_MAM_AMEM0"},
3968 	{AMDGPU_GFX_GCEA_MAM_AMEM1, "GCEA_MAM_AMEM1"},
3969 	{AMDGPU_GFX_GCEA_MAM_AMEM2, "GCEA_MAM_AMEM2"},
3970 	{AMDGPU_GFX_GCEA_MAM_AMEM3, "GCEA_MAM_AMEM3"},
3971 	{AMDGPU_GFX_GCEA_MAM_AFLUSH_BUFFER, "GCEA_MAM_AFLUSH_BUFFER"},
3972 	{AMDGPU_GFX_GCEA_WRET_TAGMEM, "GCEA_WRET_TAGMEM"},
3973 	{AMDGPU_GFX_GCEA_RRET_TAGMEM, "GCEA_RRET_TAGMEM"},
3974 	{AMDGPU_GFX_GCEA_IOWR_DATAMEM, "GCEA_IOWR_DATAMEM"},
3975 	{AMDGPU_GFX_GCEA_GMIWR_DATAMEM, "GCEA_GMIWR_DATAMEM"},
3976 	{AMDGPU_GFX_GCEA_DRAM_DATAMEM, "GCEA_DRAM_DATAMEM"},
3977 };
3978 
3979 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gc_cane_mem_list[] = {
3980 	{AMDGPU_GFX_GC_CANE_MEM0, "GC_CANE_MEM0"},
3981 };
3982 
3983 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gcutcl2_mem_list[] = {
3984 	{AMDGPU_GFX_GCUTCL2_MEM2P512X95, "GCUTCL2_MEM2P512X95"},
3985 };
3986 
3987 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gds_mem_list[] = {
3988 	{AMDGPU_GFX_GDS_MEM0, "GDS_MEM"},
3989 };
3990 
3991 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_lds_mem_list[] = {
3992 	{AMDGPU_GFX_LDS_BANK0, "LDS_BANK0"},
3993 	{AMDGPU_GFX_LDS_BANK1, "LDS_BANK1"},
3994 	{AMDGPU_GFX_LDS_BANK2, "LDS_BANK2"},
3995 	{AMDGPU_GFX_LDS_BANK3, "LDS_BANK3"},
3996 	{AMDGPU_GFX_LDS_BANK4, "LDS_BANK4"},
3997 	{AMDGPU_GFX_LDS_BANK5, "LDS_BANK5"},
3998 	{AMDGPU_GFX_LDS_BANK6, "LDS_BANK6"},
3999 	{AMDGPU_GFX_LDS_BANK7, "LDS_BANK7"},
4000 	{AMDGPU_GFX_LDS_BANK8, "LDS_BANK8"},
4001 	{AMDGPU_GFX_LDS_BANK9, "LDS_BANK9"},
4002 	{AMDGPU_GFX_LDS_BANK10, "LDS_BANK10"},
4003 	{AMDGPU_GFX_LDS_BANK11, "LDS_BANK11"},
4004 	{AMDGPU_GFX_LDS_BANK12, "LDS_BANK12"},
4005 	{AMDGPU_GFX_LDS_BANK13, "LDS_BANK13"},
4006 	{AMDGPU_GFX_LDS_BANK14, "LDS_BANK14"},
4007 	{AMDGPU_GFX_LDS_BANK15, "LDS_BANK15"},
4008 	{AMDGPU_GFX_LDS_BANK16, "LDS_BANK16"},
4009 	{AMDGPU_GFX_LDS_BANK17, "LDS_BANK17"},
4010 	{AMDGPU_GFX_LDS_BANK18, "LDS_BANK18"},
4011 	{AMDGPU_GFX_LDS_BANK19, "LDS_BANK19"},
4012 	{AMDGPU_GFX_LDS_BANK20, "LDS_BANK20"},
4013 	{AMDGPU_GFX_LDS_BANK21, "LDS_BANK21"},
4014 	{AMDGPU_GFX_LDS_BANK22, "LDS_BANK22"},
4015 	{AMDGPU_GFX_LDS_BANK23, "LDS_BANK23"},
4016 	{AMDGPU_GFX_LDS_BANK24, "LDS_BANK24"},
4017 	{AMDGPU_GFX_LDS_BANK25, "LDS_BANK25"},
4018 	{AMDGPU_GFX_LDS_BANK26, "LDS_BANK26"},
4019 	{AMDGPU_GFX_LDS_BANK27, "LDS_BANK27"},
4020 	{AMDGPU_GFX_LDS_BANK28, "LDS_BANK28"},
4021 	{AMDGPU_GFX_LDS_BANK29, "LDS_BANK29"},
4022 	{AMDGPU_GFX_LDS_BANK30, "LDS_BANK30"},
4023 	{AMDGPU_GFX_LDS_BANK31, "LDS_BANK31"},
4024 	{AMDGPU_GFX_LDS_SP_BUFFER_A, "LDS_SP_BUFFER_A"},
4025 	{AMDGPU_GFX_LDS_SP_BUFFER_B, "LDS_SP_BUFFER_B"},
4026 };
4027 
4028 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_rlc_mem_list[] = {
4029 	{AMDGPU_GFX_RLC_GPMF32, "RLC_GPMF32"},
4030 	{AMDGPU_GFX_RLC_RLCVF32, "RLC_RLCVF32"},
4031 	{AMDGPU_GFX_RLC_SCRATCH, "RLC_SCRATCH"},
4032 	{AMDGPU_GFX_RLC_SRM_ARAM, "RLC_SRM_ARAM"},
4033 	{AMDGPU_GFX_RLC_SRM_DRAM, "RLC_SRM_DRAM"},
4034 	{AMDGPU_GFX_RLC_TCTAG, "RLC_TCTAG"},
4035 	{AMDGPU_GFX_RLC_SPM_SE, "RLC_SPM_SE"},
4036 	{AMDGPU_GFX_RLC_SPM_GRBMT, "RLC_SPM_GRBMT"},
4037 };
4038 
4039 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sp_mem_list[] = {
4040 	{AMDGPU_GFX_SP_SIMDID0, "SP_SIMDID0"},
4041 };
4042 
4043 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_spi_mem_list[] = {
4044 	{AMDGPU_GFX_SPI_MEM0, "SPI_MEM0"},
4045 	{AMDGPU_GFX_SPI_MEM1, "SPI_MEM1"},
4046 	{AMDGPU_GFX_SPI_MEM2, "SPI_MEM2"},
4047 	{AMDGPU_GFX_SPI_MEM3, "SPI_MEM3"},
4048 };
4049 
4050 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sqc_mem_list[] = {
4051 	{AMDGPU_GFX_SQC_INST_CACHE_A, "SQC_INST_CACHE_A"},
4052 	{AMDGPU_GFX_SQC_INST_CACHE_B, "SQC_INST_CACHE_B"},
4053 	{AMDGPU_GFX_SQC_INST_CACHE_TAG_A, "SQC_INST_CACHE_TAG_A"},
4054 	{AMDGPU_GFX_SQC_INST_CACHE_TAG_B, "SQC_INST_CACHE_TAG_B"},
4055 	{AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_A, "SQC_INST_CACHE_MISS_FIFO_A"},
4056 	{AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_B, "SQC_INST_CACHE_MISS_FIFO_B"},
4057 	{AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_A, "SQC_INST_CACHE_GATCL1_MISS_FIFO_A"},
4058 	{AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_B, "SQC_INST_CACHE_GATCL1_MISS_FIFO_B"},
4059 	{AMDGPU_GFX_SQC_DATA_CACHE_A, "SQC_DATA_CACHE_A"},
4060 	{AMDGPU_GFX_SQC_DATA_CACHE_B, "SQC_DATA_CACHE_B"},
4061 	{AMDGPU_GFX_SQC_DATA_CACHE_TAG_A, "SQC_DATA_CACHE_TAG_A"},
4062 	{AMDGPU_GFX_SQC_DATA_CACHE_TAG_B, "SQC_DATA_CACHE_TAG_B"},
4063 	{AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_A, "SQC_DATA_CACHE_MISS_FIFO_A"},
4064 	{AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_B, "SQC_DATA_CACHE_MISS_FIFO_B"},
4065 	{AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_A, "SQC_DATA_CACHE_HIT_FIFO_A"},
4066 	{AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_B, "SQC_DATA_CACHE_HIT_FIFO_B"},
4067 	{AMDGPU_GFX_SQC_DIRTY_BIT_A, "SQC_DIRTY_BIT_A"},
4068 	{AMDGPU_GFX_SQC_DIRTY_BIT_B, "SQC_DIRTY_BIT_B"},
4069 	{AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU0, "SQC_WRITE_DATA_BUFFER_CU0"},
4070 	{AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU1, "SQC_WRITE_DATA_BUFFER_CU1"},
4071 	{AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A, "SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A"},
4072 	{AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B, "SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B"},
4073 	{AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_INST_CACHE, "SQC_UTCL1_MISS_LFIFO_INST_CACHE"},
4074 };
4075 
4076 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sq_mem_list[] = {
4077 	{AMDGPU_GFX_SQ_SGPR_MEM0, "SQ_SGPR_MEM0"},
4078 	{AMDGPU_GFX_SQ_SGPR_MEM1, "SQ_SGPR_MEM1"},
4079 	{AMDGPU_GFX_SQ_SGPR_MEM2, "SQ_SGPR_MEM2"},
4080 	{AMDGPU_GFX_SQ_SGPR_MEM3, "SQ_SGPR_MEM3"},
4081 };
4082 
4083 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_ta_mem_list[] = {
4084 	{AMDGPU_GFX_TA_FS_AFIFO_RAM_LO, "TA_FS_AFIFO_RAM_LO"},
4085 	{AMDGPU_GFX_TA_FS_AFIFO_RAM_HI, "TA_FS_AFIFO_RAM_HI"},
4086 	{AMDGPU_GFX_TA_FS_CFIFO_RAM, "TA_FS_CFIFO_RAM"},
4087 	{AMDGPU_GFX_TA_FSX_LFIFO, "TA_FSX_LFIFO"},
4088 	{AMDGPU_GFX_TA_FS_DFIFO_RAM, "TA_FS_DFIFO_RAM"},
4089 };
4090 
4091 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcc_mem_list[] = {
4092 	{AMDGPU_GFX_TCC_MEM1, "TCC_MEM1"},
4093 };
4094 
4095 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tca_mem_list[] = {
4096 	{AMDGPU_GFX_TCA_MEM1, "TCA_MEM1"},
4097 };
4098 
4099 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tci_mem_list[] = {
4100 	{AMDGPU_GFX_TCIW_MEM, "TCIW_MEM"},
4101 };
4102 
4103 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcp_mem_list[] = {
4104 	{AMDGPU_GFX_TCP_LFIFO0, "TCP_LFIFO0"},
4105 	{AMDGPU_GFX_TCP_SET0BANK0_RAM, "TCP_SET0BANK0_RAM"},
4106 	{AMDGPU_GFX_TCP_SET0BANK1_RAM, "TCP_SET0BANK1_RAM"},
4107 	{AMDGPU_GFX_TCP_SET0BANK2_RAM, "TCP_SET0BANK2_RAM"},
4108 	{AMDGPU_GFX_TCP_SET0BANK3_RAM, "TCP_SET0BANK3_RAM"},
4109 	{AMDGPU_GFX_TCP_SET1BANK0_RAM, "TCP_SET1BANK0_RAM"},
4110 	{AMDGPU_GFX_TCP_SET1BANK1_RAM, "TCP_SET1BANK1_RAM"},
4111 	{AMDGPU_GFX_TCP_SET1BANK2_RAM, "TCP_SET1BANK2_RAM"},
4112 	{AMDGPU_GFX_TCP_SET1BANK3_RAM, "TCP_SET1BANK3_RAM"},
4113 	{AMDGPU_GFX_TCP_SET2BANK0_RAM, "TCP_SET2BANK0_RAM"},
4114 	{AMDGPU_GFX_TCP_SET2BANK1_RAM, "TCP_SET2BANK1_RAM"},
4115 	{AMDGPU_GFX_TCP_SET2BANK2_RAM, "TCP_SET2BANK2_RAM"},
4116 	{AMDGPU_GFX_TCP_SET2BANK3_RAM, "TCP_SET2BANK3_RAM"},
4117 	{AMDGPU_GFX_TCP_SET3BANK0_RAM, "TCP_SET3BANK0_RAM"},
4118 	{AMDGPU_GFX_TCP_SET3BANK1_RAM, "TCP_SET3BANK1_RAM"},
4119 	{AMDGPU_GFX_TCP_SET3BANK2_RAM, "TCP_SET3BANK2_RAM"},
4120 	{AMDGPU_GFX_TCP_SET3BANK3_RAM, "TCP_SET3BANK3_RAM"},
4121 	{AMDGPU_GFX_TCP_VM_FIFO, "TCP_VM_FIFO"},
4122 	{AMDGPU_GFX_TCP_DB_TAGRAM0, "TCP_DB_TAGRAM0"},
4123 	{AMDGPU_GFX_TCP_DB_TAGRAM1, "TCP_DB_TAGRAM1"},
4124 	{AMDGPU_GFX_TCP_DB_TAGRAM2, "TCP_DB_TAGRAM2"},
4125 	{AMDGPU_GFX_TCP_DB_TAGRAM3, "TCP_DB_TAGRAM3"},
4126 	{AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE0, "TCP_UTCL1_LFIFO_PROBE0"},
4127 	{AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE1, "TCP_UTCL1_LFIFO_PROBE1"},
4128 	{AMDGPU_GFX_TCP_CMD_FIFO, "TCP_CMD_FIFO"},
4129 };
4130 
4131 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_td_mem_list[] = {
4132 	{AMDGPU_GFX_TD_UTD_CS_FIFO_MEM, "TD_UTD_CS_FIFO_MEM"},
4133 	{AMDGPU_GFX_TD_UTD_SS_FIFO_LO_MEM, "TD_UTD_SS_FIFO_LO_MEM"},
4134 	{AMDGPU_GFX_TD_UTD_SS_FIFO_HI_MEM, "TD_UTD_SS_FIFO_HI_MEM"},
4135 };
4136 
4137 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcx_mem_list[] = {
4138 	{AMDGPU_GFX_TCX_FIFOD0, "TCX_FIFOD0"},
4139 	{AMDGPU_GFX_TCX_FIFOD1, "TCX_FIFOD1"},
4140 	{AMDGPU_GFX_TCX_FIFOD2, "TCX_FIFOD2"},
4141 	{AMDGPU_GFX_TCX_FIFOD3, "TCX_FIFOD3"},
4142 	{AMDGPU_GFX_TCX_FIFOD4, "TCX_FIFOD4"},
4143 	{AMDGPU_GFX_TCX_FIFOD5, "TCX_FIFOD5"},
4144 	{AMDGPU_GFX_TCX_FIFOD6, "TCX_FIFOD6"},
4145 	{AMDGPU_GFX_TCX_FIFOD7, "TCX_FIFOD7"},
4146 	{AMDGPU_GFX_TCX_FIFOB0, "TCX_FIFOB0"},
4147 	{AMDGPU_GFX_TCX_FIFOB1, "TCX_FIFOB1"},
4148 	{AMDGPU_GFX_TCX_FIFOB2, "TCX_FIFOB2"},
4149 	{AMDGPU_GFX_TCX_FIFOB3, "TCX_FIFOB3"},
4150 	{AMDGPU_GFX_TCX_FIFOB4, "TCX_FIFOB4"},
4151 	{AMDGPU_GFX_TCX_FIFOB5, "TCX_FIFOB5"},
4152 	{AMDGPU_GFX_TCX_FIFOB6, "TCX_FIFOB6"},
4153 	{AMDGPU_GFX_TCX_FIFOB7, "TCX_FIFOB7"},
4154 	{AMDGPU_GFX_TCX_FIFOA0, "TCX_FIFOA0"},
4155 	{AMDGPU_GFX_TCX_FIFOA1, "TCX_FIFOA1"},
4156 	{AMDGPU_GFX_TCX_FIFOA2, "TCX_FIFOA2"},
4157 	{AMDGPU_GFX_TCX_FIFOA3, "TCX_FIFOA3"},
4158 	{AMDGPU_GFX_TCX_FIFOA4, "TCX_FIFOA4"},
4159 	{AMDGPU_GFX_TCX_FIFOA5, "TCX_FIFOA5"},
4160 	{AMDGPU_GFX_TCX_FIFOA6, "TCX_FIFOA6"},
4161 	{AMDGPU_GFX_TCX_FIFOA7, "TCX_FIFOA7"},
4162 	{AMDGPU_GFX_TCX_CFIFO0, "TCX_CFIFO0"},
4163 	{AMDGPU_GFX_TCX_CFIFO1, "TCX_CFIFO1"},
4164 	{AMDGPU_GFX_TCX_CFIFO2, "TCX_CFIFO2"},
4165 	{AMDGPU_GFX_TCX_CFIFO3, "TCX_CFIFO3"},
4166 	{AMDGPU_GFX_TCX_CFIFO4, "TCX_CFIFO4"},
4167 	{AMDGPU_GFX_TCX_CFIFO5, "TCX_CFIFO5"},
4168 	{AMDGPU_GFX_TCX_CFIFO6, "TCX_CFIFO6"},
4169 	{AMDGPU_GFX_TCX_CFIFO7, "TCX_CFIFO7"},
4170 	{AMDGPU_GFX_TCX_FIFO_ACKB0, "TCX_FIFO_ACKB0"},
4171 	{AMDGPU_GFX_TCX_FIFO_ACKB1, "TCX_FIFO_ACKB1"},
4172 	{AMDGPU_GFX_TCX_FIFO_ACKB2, "TCX_FIFO_ACKB2"},
4173 	{AMDGPU_GFX_TCX_FIFO_ACKB3, "TCX_FIFO_ACKB3"},
4174 	{AMDGPU_GFX_TCX_FIFO_ACKB4, "TCX_FIFO_ACKB4"},
4175 	{AMDGPU_GFX_TCX_FIFO_ACKB5, "TCX_FIFO_ACKB5"},
4176 	{AMDGPU_GFX_TCX_FIFO_ACKB6, "TCX_FIFO_ACKB6"},
4177 	{AMDGPU_GFX_TCX_FIFO_ACKB7, "TCX_FIFO_ACKB7"},
4178 	{AMDGPU_GFX_TCX_FIFO_ACKD0, "TCX_FIFO_ACKD0"},
4179 	{AMDGPU_GFX_TCX_FIFO_ACKD1, "TCX_FIFO_ACKD1"},
4180 	{AMDGPU_GFX_TCX_FIFO_ACKD2, "TCX_FIFO_ACKD2"},
4181 	{AMDGPU_GFX_TCX_FIFO_ACKD3, "TCX_FIFO_ACKD3"},
4182 	{AMDGPU_GFX_TCX_FIFO_ACKD4, "TCX_FIFO_ACKD4"},
4183 	{AMDGPU_GFX_TCX_FIFO_ACKD5, "TCX_FIFO_ACKD5"},
4184 	{AMDGPU_GFX_TCX_FIFO_ACKD6, "TCX_FIFO_ACKD6"},
4185 	{AMDGPU_GFX_TCX_FIFO_ACKD7, "TCX_FIFO_ACKD7"},
4186 	{AMDGPU_GFX_TCX_DST_FIFOA0, "TCX_DST_FIFOA0"},
4187 	{AMDGPU_GFX_TCX_DST_FIFOA1, "TCX_DST_FIFOA1"},
4188 	{AMDGPU_GFX_TCX_DST_FIFOA2, "TCX_DST_FIFOA2"},
4189 	{AMDGPU_GFX_TCX_DST_FIFOA3, "TCX_DST_FIFOA3"},
4190 	{AMDGPU_GFX_TCX_DST_FIFOA4, "TCX_DST_FIFOA4"},
4191 	{AMDGPU_GFX_TCX_DST_FIFOA5, "TCX_DST_FIFOA5"},
4192 	{AMDGPU_GFX_TCX_DST_FIFOA6, "TCX_DST_FIFOA6"},
4193 	{AMDGPU_GFX_TCX_DST_FIFOA7, "TCX_DST_FIFOA7"},
4194 	{AMDGPU_GFX_TCX_DST_FIFOB0, "TCX_DST_FIFOB0"},
4195 	{AMDGPU_GFX_TCX_DST_FIFOB1, "TCX_DST_FIFOB1"},
4196 	{AMDGPU_GFX_TCX_DST_FIFOB2, "TCX_DST_FIFOB2"},
4197 	{AMDGPU_GFX_TCX_DST_FIFOB3, "TCX_DST_FIFOB3"},
4198 	{AMDGPU_GFX_TCX_DST_FIFOB4, "TCX_DST_FIFOB4"},
4199 	{AMDGPU_GFX_TCX_DST_FIFOB5, "TCX_DST_FIFOB5"},
4200 	{AMDGPU_GFX_TCX_DST_FIFOB6, "TCX_DST_FIFOB6"},
4201 	{AMDGPU_GFX_TCX_DST_FIFOB7, "TCX_DST_FIFOB7"},
4202 	{AMDGPU_GFX_TCX_DST_FIFOD0, "TCX_DST_FIFOD0"},
4203 	{AMDGPU_GFX_TCX_DST_FIFOD1, "TCX_DST_FIFOD1"},
4204 	{AMDGPU_GFX_TCX_DST_FIFOD2, "TCX_DST_FIFOD2"},
4205 	{AMDGPU_GFX_TCX_DST_FIFOD3, "TCX_DST_FIFOD3"},
4206 	{AMDGPU_GFX_TCX_DST_FIFOD4, "TCX_DST_FIFOD4"},
4207 	{AMDGPU_GFX_TCX_DST_FIFOD5, "TCX_DST_FIFOD5"},
4208 	{AMDGPU_GFX_TCX_DST_FIFOD6, "TCX_DST_FIFOD6"},
4209 	{AMDGPU_GFX_TCX_DST_FIFOD7, "TCX_DST_FIFOD7"},
4210 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB0, "TCX_DST_FIFO_ACKB0"},
4211 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB1, "TCX_DST_FIFO_ACKB1"},
4212 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB2, "TCX_DST_FIFO_ACKB2"},
4213 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB3, "TCX_DST_FIFO_ACKB3"},
4214 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB4, "TCX_DST_FIFO_ACKB4"},
4215 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB5, "TCX_DST_FIFO_ACKB5"},
4216 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB6, "TCX_DST_FIFO_ACKB6"},
4217 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB7, "TCX_DST_FIFO_ACKB7"},
4218 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD0, "TCX_DST_FIFO_ACKD0"},
4219 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD1, "TCX_DST_FIFO_ACKD1"},
4220 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD2, "TCX_DST_FIFO_ACKD2"},
4221 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD3, "TCX_DST_FIFO_ACKD3"},
4222 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD4, "TCX_DST_FIFO_ACKD4"},
4223 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD5, "TCX_DST_FIFO_ACKD5"},
4224 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD6, "TCX_DST_FIFO_ACKD6"},
4225 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD7, "TCX_DST_FIFO_ACKD7"},
4226 };
4227 
4228 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_atc_l2_mem_list[] = {
4229 	{AMDGPU_GFX_ATC_L2_MEM, "ATC_L2_MEM"},
4230 };
4231 
4232 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_utcl2_mem_list[] = {
4233 	{AMDGPU_GFX_UTCL2_MEM, "UTCL2_MEM"},
4234 };
4235 
4236 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_vml2_mem_list[] = {
4237 	{AMDGPU_GFX_VML2_MEM, "VML2_MEM"},
4238 };
4239 
4240 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_vml2_walker_mem_list[] = {
4241 	{AMDGPU_GFX_VML2_WALKER_MEM, "VML2_WALKER_MEM"},
4242 };
4243 
4244 static const struct amdgpu_gfx_ras_mem_id_entry gfx_v9_4_3_ras_mem_list_array[AMDGPU_GFX_MEM_TYPE_NUM] = {
4245 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_cp_mem_list)
4246 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gcea_mem_list)
4247 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gc_cane_mem_list)
4248 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gcutcl2_mem_list)
4249 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gds_mem_list)
4250 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_lds_mem_list)
4251 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_rlc_mem_list)
4252 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sp_mem_list)
4253 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_spi_mem_list)
4254 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sqc_mem_list)
4255 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sq_mem_list)
4256 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_ta_mem_list)
4257 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcc_mem_list)
4258 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tca_mem_list)
4259 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tci_mem_list)
4260 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcp_mem_list)
4261 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_td_mem_list)
4262 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcx_mem_list)
4263 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_atc_l2_mem_list)
4264 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_utcl2_mem_list)
4265 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_vml2_mem_list)
4266 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_vml2_walker_mem_list)
4267 };
4268 
4269 static const struct amdgpu_gfx_ras_reg_entry gfx_v9_4_3_ce_reg_list[] = {
4270 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regRLC_CE_ERR_STATUS_LOW, regRLC_CE_ERR_STATUS_HIGH),
4271 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "RLC"},
4272 	    AMDGPU_GFX_RLC_MEM, 1},
4273 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPC_CE_ERR_STATUS_LO, regCPC_CE_ERR_STATUS_HI),
4274 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPC"},
4275 	    AMDGPU_GFX_CP_MEM, 1},
4276 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPF_CE_ERR_STATUS_LO, regCPF_CE_ERR_STATUS_HI),
4277 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPF"},
4278 	    AMDGPU_GFX_CP_MEM, 1},
4279 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPG_CE_ERR_STATUS_LO, regCPG_CE_ERR_STATUS_HI),
4280 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPG"},
4281 	    AMDGPU_GFX_CP_MEM, 1},
4282 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regGDS_CE_ERR_STATUS_LO, regGDS_CE_ERR_STATUS_HI),
4283 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GDS"},
4284 	    AMDGPU_GFX_GDS_MEM, 1},
4285 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regGC_CANE_CE_ERR_STATUS_LO, regGC_CANE_CE_ERR_STATUS_HI),
4286 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CANE"},
4287 	    AMDGPU_GFX_GC_CANE_MEM, 1},
4288 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSPI_CE_ERR_STATUS_LO, regSPI_CE_ERR_STATUS_HI),
4289 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SPI"},
4290 	    AMDGPU_GFX_SPI_MEM, 1},
4291 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP0_CE_ERR_STATUS_LO, regSP0_CE_ERR_STATUS_HI),
4292 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP0"},
4293 	    AMDGPU_GFX_SP_MEM, 4},
4294 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP1_CE_ERR_STATUS_LO, regSP1_CE_ERR_STATUS_HI),
4295 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP1"},
4296 	    AMDGPU_GFX_SP_MEM, 4},
4297 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQ_CE_ERR_STATUS_LO, regSQ_CE_ERR_STATUS_HI),
4298 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQ"},
4299 	    AMDGPU_GFX_SQ_MEM, 4},
4300 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQC_CE_EDC_LO, regSQC_CE_EDC_HI),
4301 	    5, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQC"},
4302 	    AMDGPU_GFX_SQC_MEM, 4},
4303 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCX_CE_ERR_STATUS_LO, regTCX_CE_ERR_STATUS_HI),
4304 	    2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCX"},
4305 	    AMDGPU_GFX_TCX_MEM, 1},
4306 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCC_CE_ERR_STATUS_LO, regTCC_CE_ERR_STATUS_HI),
4307 	    16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCC"},
4308 	    AMDGPU_GFX_TCC_MEM, 1},
4309 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTA_CE_EDC_LO, regTA_CE_EDC_HI),
4310 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TA"},
4311 	    AMDGPU_GFX_TA_MEM, 4},
4312 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCI_CE_EDC_LO_REG, regTCI_CE_EDC_HI_REG),
4313 	    27, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCI"},
4314 	    AMDGPU_GFX_TCI_MEM, 1},
4315 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCP_CE_EDC_LO_REG, regTCP_CE_EDC_HI_REG),
4316 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCP"},
4317 	    AMDGPU_GFX_TCP_MEM, 4},
4318 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTD_CE_EDC_LO, regTD_CE_EDC_HI),
4319 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TD"},
4320 	    AMDGPU_GFX_TD_MEM, 4},
4321 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regGCEA_CE_ERR_STATUS_LO, regGCEA_CE_ERR_STATUS_HI),
4322 	    16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GCEA"},
4323 	    AMDGPU_GFX_GCEA_MEM, 1},
4324 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regLDS_CE_ERR_STATUS_LO, regLDS_CE_ERR_STATUS_HI),
4325 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "LDS"},
4326 	    AMDGPU_GFX_LDS_MEM, 4},
4327 };
4328 
4329 static const struct amdgpu_gfx_ras_reg_entry gfx_v9_4_3_ue_reg_list[] = {
4330 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regRLC_UE_ERR_STATUS_LOW, regRLC_UE_ERR_STATUS_HIGH),
4331 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "RLC"},
4332 	    AMDGPU_GFX_RLC_MEM, 1},
4333 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPC_UE_ERR_STATUS_LO, regCPC_UE_ERR_STATUS_HI),
4334 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPC"},
4335 	    AMDGPU_GFX_CP_MEM, 1},
4336 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPF_UE_ERR_STATUS_LO, regCPF_UE_ERR_STATUS_HI),
4337 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPF"},
4338 	    AMDGPU_GFX_CP_MEM, 1},
4339 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPG_UE_ERR_STATUS_LO, regCPG_UE_ERR_STATUS_HI),
4340 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPG"},
4341 	    AMDGPU_GFX_CP_MEM, 1},
4342 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regGDS_UE_ERR_STATUS_LO, regGDS_UE_ERR_STATUS_HI),
4343 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GDS"},
4344 	    AMDGPU_GFX_GDS_MEM, 1},
4345 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regGC_CANE_UE_ERR_STATUS_LO, regGC_CANE_UE_ERR_STATUS_HI),
4346 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CANE"},
4347 	    AMDGPU_GFX_GC_CANE_MEM, 1},
4348 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSPI_UE_ERR_STATUS_LO, regSPI_UE_ERR_STATUS_HI),
4349 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SPI"},
4350 	    AMDGPU_GFX_SPI_MEM, 1},
4351 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP0_UE_ERR_STATUS_LO, regSP0_UE_ERR_STATUS_HI),
4352 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP0"},
4353 	    AMDGPU_GFX_SP_MEM, 4},
4354 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP1_UE_ERR_STATUS_LO, regSP1_UE_ERR_STATUS_HI),
4355 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP1"},
4356 	    AMDGPU_GFX_SP_MEM, 4},
4357 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQ_UE_ERR_STATUS_LO, regSQ_UE_ERR_STATUS_HI),
4358 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQ"},
4359 	    AMDGPU_GFX_SQ_MEM, 4},
4360 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQC_UE_EDC_LO, regSQC_UE_EDC_HI),
4361 	    5, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQC"},
4362 	    AMDGPU_GFX_SQC_MEM, 4},
4363 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCX_UE_ERR_STATUS_LO, regTCX_UE_ERR_STATUS_HI),
4364 	    2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCX"},
4365 	    AMDGPU_GFX_TCX_MEM, 1},
4366 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCC_UE_ERR_STATUS_LO, regTCC_UE_ERR_STATUS_HI),
4367 	    16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCC"},
4368 	    AMDGPU_GFX_TCC_MEM, 1},
4369 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTA_UE_EDC_LO, regTA_UE_EDC_HI),
4370 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TA"},
4371 	    AMDGPU_GFX_TA_MEM, 4},
4372 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCI_UE_EDC_LO_REG, regTCI_UE_EDC_HI_REG),
4373 	    27, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCI"},
4374 	    AMDGPU_GFX_TCI_MEM, 1},
4375 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCP_UE_EDC_LO_REG, regTCP_UE_EDC_HI_REG),
4376 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCP"},
4377 	    AMDGPU_GFX_TCP_MEM, 4},
4378 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTD_UE_EDC_LO, regTD_UE_EDC_HI),
4379 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TD"},
4380 	    AMDGPU_GFX_TD_MEM, 4},
4381 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCA_UE_ERR_STATUS_LO, regTCA_UE_ERR_STATUS_HI),
4382 	    2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCA"},
4383 	    AMDGPU_GFX_TCA_MEM, 1},
4384 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regGCEA_UE_ERR_STATUS_LO, regGCEA_UE_ERR_STATUS_HI),
4385 	    16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GCEA"},
4386 	    AMDGPU_GFX_GCEA_MEM, 1},
4387 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regLDS_UE_ERR_STATUS_LO, regLDS_UE_ERR_STATUS_HI),
4388 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "LDS"},
4389 	    AMDGPU_GFX_LDS_MEM, 4},
4390 };
4391 
gfx_v9_4_3_inst_query_ras_err_count(struct amdgpu_device * adev,void * ras_error_status,int xcc_id)4392 static void gfx_v9_4_3_inst_query_ras_err_count(struct amdgpu_device *adev,
4393 					void *ras_error_status, int xcc_id)
4394 {
4395 	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
4396 	unsigned long ce_count = 0, ue_count = 0;
4397 	uint32_t i, j, k;
4398 
4399 	/* NOTE: convert xcc_id to physical XCD ID (XCD0 or XCD1) */
4400 	struct amdgpu_smuio_mcm_config_info mcm_info = {
4401 		.socket_id = adev->smuio.funcs->get_socket_id(adev),
4402 		.die_id = xcc_id & 0x01 ? 1 : 0,
4403 	};
4404 
4405 	mutex_lock(&adev->grbm_idx_mutex);
4406 
4407 	for (i = 0; i < ARRAY_SIZE(gfx_v9_4_3_ce_reg_list); i++) {
4408 		for (j = 0; j < gfx_v9_4_3_ce_reg_list[i].se_num; j++) {
4409 			for (k = 0; k < gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst; k++) {
4410 				/* no need to select if instance number is 1 */
4411 				if (gfx_v9_4_3_ce_reg_list[i].se_num > 1 ||
4412 				    gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst > 1)
4413 					gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
4414 
4415 				amdgpu_ras_inst_query_ras_error_count(adev,
4416 					&(gfx_v9_4_3_ce_reg_list[i].reg_entry),
4417 					1,
4418 					gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ce_reg_list[i].mem_id_type].mem_id_ent,
4419 					gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ce_reg_list[i].mem_id_type].size,
4420 					GET_INST(GC, xcc_id),
4421 					AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE,
4422 					&ce_count);
4423 
4424 				amdgpu_ras_inst_query_ras_error_count(adev,
4425 					&(gfx_v9_4_3_ue_reg_list[i].reg_entry),
4426 					1,
4427 					gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].mem_id_ent,
4428 					gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].size,
4429 					GET_INST(GC, xcc_id),
4430 					AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
4431 					&ue_count);
4432 			}
4433 		}
4434 	}
4435 
4436 	/* handle extra register entries of UE */
4437 	for (; i < ARRAY_SIZE(gfx_v9_4_3_ue_reg_list); i++) {
4438 		for (j = 0; j < gfx_v9_4_3_ue_reg_list[i].se_num; j++) {
4439 			for (k = 0; k < gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst; k++) {
4440 				/* no need to select if instance number is 1 */
4441 				if (gfx_v9_4_3_ue_reg_list[i].se_num > 1 ||
4442 					gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst > 1)
4443 					gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
4444 
4445 				amdgpu_ras_inst_query_ras_error_count(adev,
4446 					&(gfx_v9_4_3_ue_reg_list[i].reg_entry),
4447 					1,
4448 					gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].mem_id_ent,
4449 					gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].size,
4450 					GET_INST(GC, xcc_id),
4451 					AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
4452 					&ue_count);
4453 			}
4454 		}
4455 	}
4456 
4457 	gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
4458 			xcc_id);
4459 	mutex_unlock(&adev->grbm_idx_mutex);
4460 
4461 	/* the caller should make sure initialize value of
4462 	 * err_data->ue_count and err_data->ce_count
4463 	 */
4464 	amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count);
4465 	amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count);
4466 }
4467 
gfx_v9_4_3_inst_reset_ras_err_count(struct amdgpu_device * adev,void * ras_error_status,int xcc_id)4468 static void gfx_v9_4_3_inst_reset_ras_err_count(struct amdgpu_device *adev,
4469 					void *ras_error_status, int xcc_id)
4470 {
4471 	uint32_t i, j, k;
4472 
4473 	mutex_lock(&adev->grbm_idx_mutex);
4474 
4475 	for (i = 0; i < ARRAY_SIZE(gfx_v9_4_3_ce_reg_list); i++) {
4476 		for (j = 0; j < gfx_v9_4_3_ce_reg_list[i].se_num; j++) {
4477 			for (k = 0; k < gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst; k++) {
4478 				/* no need to select if instance number is 1 */
4479 				if (gfx_v9_4_3_ce_reg_list[i].se_num > 1 ||
4480 				    gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst > 1)
4481 					gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
4482 
4483 				amdgpu_ras_inst_reset_ras_error_count(adev,
4484 					&(gfx_v9_4_3_ce_reg_list[i].reg_entry),
4485 					1,
4486 					GET_INST(GC, xcc_id));
4487 
4488 				amdgpu_ras_inst_reset_ras_error_count(adev,
4489 					&(gfx_v9_4_3_ue_reg_list[i].reg_entry),
4490 					1,
4491 					GET_INST(GC, xcc_id));
4492 			}
4493 		}
4494 	}
4495 
4496 	/* handle extra register entries of UE */
4497 	for (; i < ARRAY_SIZE(gfx_v9_4_3_ue_reg_list); i++) {
4498 		for (j = 0; j < gfx_v9_4_3_ue_reg_list[i].se_num; j++) {
4499 			for (k = 0; k < gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst; k++) {
4500 				/* no need to select if instance number is 1 */
4501 				if (gfx_v9_4_3_ue_reg_list[i].se_num > 1 ||
4502 					gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst > 1)
4503 					gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
4504 
4505 				amdgpu_ras_inst_reset_ras_error_count(adev,
4506 					&(gfx_v9_4_3_ue_reg_list[i].reg_entry),
4507 					1,
4508 					GET_INST(GC, xcc_id));
4509 			}
4510 		}
4511 	}
4512 
4513 	gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
4514 			xcc_id);
4515 	mutex_unlock(&adev->grbm_idx_mutex);
4516 }
4517 
gfx_v9_4_3_inst_enable_watchdog_timer(struct amdgpu_device * adev,void * ras_error_status,int xcc_id)4518 static void gfx_v9_4_3_inst_enable_watchdog_timer(struct amdgpu_device *adev,
4519 					void *ras_error_status, int xcc_id)
4520 {
4521 	uint32_t i;
4522 	uint32_t data;
4523 
4524 	if (amdgpu_sriov_vf(adev))
4525 		return;
4526 
4527 	data = RREG32_SOC15(GC, GET_INST(GC, 0), regSQ_TIMEOUT_CONFIG);
4528 	data = REG_SET_FIELD(data, SQ_TIMEOUT_CONFIG, TIMEOUT_FATAL_DISABLE,
4529 			     amdgpu_watchdog_timer.timeout_fatal_disable ? 1 : 0);
4530 
4531 	if (amdgpu_watchdog_timer.timeout_fatal_disable &&
4532 	    (amdgpu_watchdog_timer.period < 1 ||
4533 	     amdgpu_watchdog_timer.period > 0x23)) {
4534 		dev_warn(adev->dev, "Watchdog period range is 1 to 0x23\n");
4535 		amdgpu_watchdog_timer.period = 0x23;
4536 	}
4537 	data = REG_SET_FIELD(data, SQ_TIMEOUT_CONFIG, PERIOD_SEL,
4538 			     amdgpu_watchdog_timer.period);
4539 
4540 	mutex_lock(&adev->grbm_idx_mutex);
4541 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
4542 		gfx_v9_4_3_xcc_select_se_sh(adev, i, 0xffffffff, 0xffffffff, xcc_id);
4543 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_TIMEOUT_CONFIG, data);
4544 	}
4545 	gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
4546 			xcc_id);
4547 	mutex_unlock(&adev->grbm_idx_mutex);
4548 }
4549 
gfx_v9_4_3_query_ras_error_count(struct amdgpu_device * adev,void * ras_error_status)4550 static void gfx_v9_4_3_query_ras_error_count(struct amdgpu_device *adev,
4551 					void *ras_error_status)
4552 {
4553 	amdgpu_gfx_ras_error_func(adev, ras_error_status,
4554 			gfx_v9_4_3_inst_query_ras_err_count);
4555 }
4556 
gfx_v9_4_3_reset_ras_error_count(struct amdgpu_device * adev)4557 static void gfx_v9_4_3_reset_ras_error_count(struct amdgpu_device *adev)
4558 {
4559 	amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_reset_ras_err_count);
4560 }
4561 
gfx_v9_4_3_enable_watchdog_timer(struct amdgpu_device * adev)4562 static void gfx_v9_4_3_enable_watchdog_timer(struct amdgpu_device *adev)
4563 {
4564 	amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_enable_watchdog_timer);
4565 }
4566 
gfx_v9_4_3_ring_insert_nop(struct amdgpu_ring * ring,uint32_t num_nop)4567 static void gfx_v9_4_3_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
4568 {
4569 	int i;
4570 
4571 	/* Header itself is a NOP packet */
4572 	if (num_nop == 1) {
4573 		amdgpu_ring_write(ring, ring->funcs->nop);
4574 		return;
4575 	}
4576 
4577 	/* Max HW optimization till 0x3ffe, followed by remaining one NOP at a time*/
4578 	amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe)));
4579 
4580 	/* Header is at index 0, followed by num_nops - 1 NOP packet's */
4581 	for (i = 1; i < num_nop; i++)
4582 		amdgpu_ring_write(ring, ring->funcs->nop);
4583 }
4584 
gfx_v9_4_3_ip_print(void * handle,struct drm_printer * p)4585 static void gfx_v9_4_3_ip_print(void *handle, struct drm_printer *p)
4586 {
4587 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4588 	uint32_t i, j, k;
4589 	uint32_t xcc_id, xcc_offset, inst_offset;
4590 	uint32_t num_xcc, reg, num_inst;
4591 	uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9_4_3);
4592 
4593 	if (!adev->gfx.ip_dump_core)
4594 		return;
4595 
4596 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
4597 	drm_printf(p, "Number of Instances:%d\n", num_xcc);
4598 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
4599 		xcc_offset = xcc_id * reg_count;
4600 		drm_printf(p, "\nInstance id:%d\n", xcc_id);
4601 		for (i = 0; i < reg_count; i++)
4602 			drm_printf(p, "%-50s \t 0x%08x\n",
4603 				   gc_reg_list_9_4_3[i].reg_name,
4604 				   adev->gfx.ip_dump_core[xcc_offset + i]);
4605 	}
4606 
4607 	/* print compute queue registers for all instances */
4608 	if (!adev->gfx.ip_dump_compute_queues)
4609 		return;
4610 
4611 	num_inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
4612 		adev->gfx.mec.num_queue_per_pipe;
4613 
4614 	reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3);
4615 	drm_printf(p, "\nnum_xcc: %d num_mec: %d num_pipe: %d num_queue: %d\n",
4616 		   num_xcc,
4617 		   adev->gfx.mec.num_mec,
4618 		   adev->gfx.mec.num_pipe_per_mec,
4619 		   adev->gfx.mec.num_queue_per_pipe);
4620 
4621 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
4622 		xcc_offset = xcc_id * reg_count * num_inst;
4623 		inst_offset = 0;
4624 		for (i = 0; i < adev->gfx.mec.num_mec; i++) {
4625 			for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
4626 				for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
4627 					drm_printf(p,
4628 						   "\nxcc:%d mec:%d, pipe:%d, queue:%d\n",
4629 						    xcc_id, i, j, k);
4630 					for (reg = 0; reg < reg_count; reg++) {
4631 						drm_printf(p,
4632 							   "%-50s \t 0x%08x\n",
4633 							   gc_cp_reg_list_9_4_3[reg].reg_name,
4634 							   adev->gfx.ip_dump_compute_queues
4635 								[xcc_offset + inst_offset +
4636 								reg]);
4637 					}
4638 					inst_offset += reg_count;
4639 				}
4640 			}
4641 		}
4642 	}
4643 }
4644 
gfx_v9_4_3_ip_dump(void * handle)4645 static void gfx_v9_4_3_ip_dump(void *handle)
4646 {
4647 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4648 	uint32_t i, j, k;
4649 	uint32_t num_xcc, reg, num_inst;
4650 	uint32_t xcc_id, xcc_offset, inst_offset;
4651 	uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9_4_3);
4652 
4653 	if (!adev->gfx.ip_dump_core)
4654 		return;
4655 
4656 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
4657 
4658 	amdgpu_gfx_off_ctrl(adev, false);
4659 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
4660 		xcc_offset = xcc_id * reg_count;
4661 		for (i = 0; i < reg_count; i++)
4662 			adev->gfx.ip_dump_core[xcc_offset + i] =
4663 				RREG32(SOC15_REG_ENTRY_OFFSET_INST(gc_reg_list_9_4_3[i],
4664 								   GET_INST(GC, xcc_id)));
4665 	}
4666 	amdgpu_gfx_off_ctrl(adev, true);
4667 
4668 	/* dump compute queue registers for all instances */
4669 	if (!adev->gfx.ip_dump_compute_queues)
4670 		return;
4671 
4672 	num_inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
4673 		adev->gfx.mec.num_queue_per_pipe;
4674 	reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3);
4675 	amdgpu_gfx_off_ctrl(adev, false);
4676 	mutex_lock(&adev->srbm_mutex);
4677 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
4678 		xcc_offset = xcc_id * reg_count * num_inst;
4679 		inst_offset = 0;
4680 		for (i = 0; i < adev->gfx.mec.num_mec; i++) {
4681 			for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
4682 				for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
4683 					/* ME0 is for GFX so start from 1 for CP */
4684 					soc15_grbm_select(adev, 1 + i, j, k, 0,
4685 							  GET_INST(GC, xcc_id));
4686 
4687 					for (reg = 0; reg < reg_count; reg++) {
4688 						adev->gfx.ip_dump_compute_queues
4689 							[xcc_offset +
4690 							 inst_offset + reg] =
4691 							RREG32(SOC15_REG_ENTRY_OFFSET_INST(
4692 								gc_cp_reg_list_9_4_3[reg],
4693 								GET_INST(GC, xcc_id)));
4694 					}
4695 					inst_offset += reg_count;
4696 				}
4697 			}
4698 		}
4699 	}
4700 	soc15_grbm_select(adev, 0, 0, 0, 0, 0);
4701 	mutex_unlock(&adev->srbm_mutex);
4702 	amdgpu_gfx_off_ctrl(adev, true);
4703 }
4704 
gfx_v9_4_3_ring_emit_cleaner_shader(struct amdgpu_ring * ring)4705 static void gfx_v9_4_3_ring_emit_cleaner_shader(struct amdgpu_ring *ring)
4706 {
4707 	/* Emit the cleaner shader */
4708 	amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0));
4709 	amdgpu_ring_write(ring, 0);  /* RESERVED field, programmed to zero */
4710 }
4711 
4712 static const struct amd_ip_funcs gfx_v9_4_3_ip_funcs = {
4713 	.name = "gfx_v9_4_3",
4714 	.early_init = gfx_v9_4_3_early_init,
4715 	.late_init = gfx_v9_4_3_late_init,
4716 	.sw_init = gfx_v9_4_3_sw_init,
4717 	.sw_fini = gfx_v9_4_3_sw_fini,
4718 	.hw_init = gfx_v9_4_3_hw_init,
4719 	.hw_fini = gfx_v9_4_3_hw_fini,
4720 	.suspend = gfx_v9_4_3_suspend,
4721 	.resume = gfx_v9_4_3_resume,
4722 	.is_idle = gfx_v9_4_3_is_idle,
4723 	.wait_for_idle = gfx_v9_4_3_wait_for_idle,
4724 	.soft_reset = gfx_v9_4_3_soft_reset,
4725 	.set_clockgating_state = gfx_v9_4_3_set_clockgating_state,
4726 	.set_powergating_state = gfx_v9_4_3_set_powergating_state,
4727 	.get_clockgating_state = gfx_v9_4_3_get_clockgating_state,
4728 	.dump_ip_state = gfx_v9_4_3_ip_dump,
4729 	.print_ip_state = gfx_v9_4_3_ip_print,
4730 };
4731 
4732 static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_compute = {
4733 	.type = AMDGPU_RING_TYPE_COMPUTE,
4734 	.align_mask = 0xff,
4735 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
4736 	.support_64bit_ptrs = true,
4737 	.get_rptr = gfx_v9_4_3_ring_get_rptr_compute,
4738 	.get_wptr = gfx_v9_4_3_ring_get_wptr_compute,
4739 	.set_wptr = gfx_v9_4_3_ring_set_wptr_compute,
4740 	.emit_frame_size =
4741 		20 + /* gfx_v9_4_3_ring_emit_gds_switch */
4742 		7 + /* gfx_v9_4_3_ring_emit_hdp_flush */
4743 		5 + /* hdp invalidate */
4744 		7 + /* gfx_v9_4_3_ring_emit_pipeline_sync */
4745 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4746 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4747 		2 + /* gfx_v9_4_3_ring_emit_vm_flush */
4748 		8 + 8 + 8 + /* gfx_v9_4_3_ring_emit_fence x3 for user fence, vm fence */
4749 		7 + /* gfx_v9_4_3_emit_mem_sync */
4750 		5 + /* gfx_v9_4_3_emit_wave_limit for updating regSPI_WCL_PIPE_PERCENT_GFX register */
4751 		15 + /* for updating 3 regSPI_WCL_PIPE_PERCENT_CS registers */
4752 		2, /* gfx_v9_4_3_ring_emit_cleaner_shader */
4753 	.emit_ib_size =	7, /* gfx_v9_4_3_ring_emit_ib_compute */
4754 	.emit_ib = gfx_v9_4_3_ring_emit_ib_compute,
4755 	.emit_fence = gfx_v9_4_3_ring_emit_fence,
4756 	.emit_pipeline_sync = gfx_v9_4_3_ring_emit_pipeline_sync,
4757 	.emit_vm_flush = gfx_v9_4_3_ring_emit_vm_flush,
4758 	.emit_gds_switch = gfx_v9_4_3_ring_emit_gds_switch,
4759 	.emit_hdp_flush = gfx_v9_4_3_ring_emit_hdp_flush,
4760 	.test_ring = gfx_v9_4_3_ring_test_ring,
4761 	.test_ib = gfx_v9_4_3_ring_test_ib,
4762 	.insert_nop = gfx_v9_4_3_ring_insert_nop,
4763 	.pad_ib = amdgpu_ring_generic_pad_ib,
4764 	.emit_wreg = gfx_v9_4_3_ring_emit_wreg,
4765 	.emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait,
4766 	.emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait,
4767 	.soft_recovery = gfx_v9_4_3_ring_soft_recovery,
4768 	.emit_mem_sync = gfx_v9_4_3_emit_mem_sync,
4769 	.emit_wave_limit = gfx_v9_4_3_emit_wave_limit,
4770 	.reset = gfx_v9_4_3_reset_kcq,
4771 	.emit_cleaner_shader = gfx_v9_4_3_ring_emit_cleaner_shader,
4772 	.begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
4773 	.end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
4774 };
4775 
4776 static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_kiq = {
4777 	.type = AMDGPU_RING_TYPE_KIQ,
4778 	.align_mask = 0xff,
4779 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
4780 	.support_64bit_ptrs = true,
4781 	.get_rptr = gfx_v9_4_3_ring_get_rptr_compute,
4782 	.get_wptr = gfx_v9_4_3_ring_get_wptr_compute,
4783 	.set_wptr = gfx_v9_4_3_ring_set_wptr_compute,
4784 	.emit_frame_size =
4785 		20 + /* gfx_v9_4_3_ring_emit_gds_switch */
4786 		7 + /* gfx_v9_4_3_ring_emit_hdp_flush */
4787 		5 + /* hdp invalidate */
4788 		7 + /* gfx_v9_4_3_ring_emit_pipeline_sync */
4789 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4790 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4791 		2 + /* gfx_v9_4_3_ring_emit_vm_flush */
4792 		8 + 8 + 8, /* gfx_v9_4_3_ring_emit_fence_kiq x3 for user fence, vm fence */
4793 	.emit_ib_size =	7, /* gfx_v9_4_3_ring_emit_ib_compute */
4794 	.emit_fence = gfx_v9_4_3_ring_emit_fence_kiq,
4795 	.test_ring = gfx_v9_4_3_ring_test_ring,
4796 	.insert_nop = amdgpu_ring_insert_nop,
4797 	.pad_ib = amdgpu_ring_generic_pad_ib,
4798 	.emit_rreg = gfx_v9_4_3_ring_emit_rreg,
4799 	.emit_wreg = gfx_v9_4_3_ring_emit_wreg,
4800 	.emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait,
4801 	.emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait,
4802 };
4803 
gfx_v9_4_3_set_ring_funcs(struct amdgpu_device * adev)4804 static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev)
4805 {
4806 	int i, j, num_xcc;
4807 
4808 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
4809 	for (i = 0; i < num_xcc; i++) {
4810 		adev->gfx.kiq[i].ring.funcs = &gfx_v9_4_3_ring_funcs_kiq;
4811 
4812 		for (j = 0; j < adev->gfx.num_compute_rings; j++)
4813 			adev->gfx.compute_ring[j + i * adev->gfx.num_compute_rings].funcs
4814 					= &gfx_v9_4_3_ring_funcs_compute;
4815 	}
4816 }
4817 
4818 static const struct amdgpu_irq_src_funcs gfx_v9_4_3_eop_irq_funcs = {
4819 	.set = gfx_v9_4_3_set_eop_interrupt_state,
4820 	.process = gfx_v9_4_3_eop_irq,
4821 };
4822 
4823 static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_reg_irq_funcs = {
4824 	.set = gfx_v9_4_3_set_priv_reg_fault_state,
4825 	.process = gfx_v9_4_3_priv_reg_irq,
4826 };
4827 
4828 static const struct amdgpu_irq_src_funcs gfx_v9_4_3_bad_op_irq_funcs = {
4829 	.set = gfx_v9_4_3_set_bad_op_fault_state,
4830 	.process = gfx_v9_4_3_bad_op_irq,
4831 };
4832 
4833 static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_inst_irq_funcs = {
4834 	.set = gfx_v9_4_3_set_priv_inst_fault_state,
4835 	.process = gfx_v9_4_3_priv_inst_irq,
4836 };
4837 
gfx_v9_4_3_set_irq_funcs(struct amdgpu_device * adev)4838 static void gfx_v9_4_3_set_irq_funcs(struct amdgpu_device *adev)
4839 {
4840 	adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
4841 	adev->gfx.eop_irq.funcs = &gfx_v9_4_3_eop_irq_funcs;
4842 
4843 	adev->gfx.priv_reg_irq.num_types = 1;
4844 	adev->gfx.priv_reg_irq.funcs = &gfx_v9_4_3_priv_reg_irq_funcs;
4845 
4846 	adev->gfx.bad_op_irq.num_types = 1;
4847 	adev->gfx.bad_op_irq.funcs = &gfx_v9_4_3_bad_op_irq_funcs;
4848 
4849 	adev->gfx.priv_inst_irq.num_types = 1;
4850 	adev->gfx.priv_inst_irq.funcs = &gfx_v9_4_3_priv_inst_irq_funcs;
4851 }
4852 
gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device * adev)4853 static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev)
4854 {
4855 	adev->gfx.rlc.funcs = &gfx_v9_4_3_rlc_funcs;
4856 }
4857 
4858 
gfx_v9_4_3_set_gds_init(struct amdgpu_device * adev)4859 static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev)
4860 {
4861 	/* init asci gds info */
4862 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
4863 	case IP_VERSION(9, 4, 3):
4864 	case IP_VERSION(9, 4, 4):
4865 		/* 9.4.3 removed all the GDS internal memory,
4866 		 * only support GWS opcode in kernel, like barrier
4867 		 * semaphore.etc */
4868 		adev->gds.gds_size = 0;
4869 		break;
4870 	default:
4871 		adev->gds.gds_size = 0x10000;
4872 		break;
4873 	}
4874 
4875 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
4876 	case IP_VERSION(9, 4, 3):
4877 	case IP_VERSION(9, 4, 4):
4878 		/* deprecated for 9.4.3, no usage at all */
4879 		adev->gds.gds_compute_max_wave_id = 0;
4880 		break;
4881 	default:
4882 		/* this really depends on the chip */
4883 		adev->gds.gds_compute_max_wave_id = 0x7ff;
4884 		break;
4885 	}
4886 
4887 	adev->gds.gws_size = 64;
4888 	adev->gds.oa_size = 16;
4889 }
4890 
gfx_v9_4_3_set_user_cu_inactive_bitmap(struct amdgpu_device * adev,u32 bitmap,int xcc_id)4891 static void gfx_v9_4_3_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
4892 						 u32 bitmap, int xcc_id)
4893 {
4894 	u32 data;
4895 
4896 	if (!bitmap)
4897 		return;
4898 
4899 	data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4900 	data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4901 
4902 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG, data);
4903 }
4904 
gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device * adev,int xcc_id)4905 static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev, int xcc_id)
4906 {
4907 	u32 data, mask;
4908 
4909 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCC_GC_SHADER_ARRAY_CONFIG);
4910 	data |= RREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG);
4911 
4912 	data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4913 	data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4914 
4915 	mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
4916 
4917 	return (~data) & mask;
4918 }
4919 
gfx_v9_4_3_get_cu_info(struct amdgpu_device * adev,struct amdgpu_cu_info * cu_info)4920 static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
4921 				 struct amdgpu_cu_info *cu_info)
4922 {
4923 	int i, j, k, prev_counter, counter, xcc_id, active_cu_number = 0;
4924 	u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0, tmp;
4925 	unsigned disable_masks[4 * 4];
4926 	bool is_symmetric_cus;
4927 
4928 	if (!adev || !cu_info)
4929 		return -EINVAL;
4930 
4931 	/*
4932 	 * 16 comes from bitmap array size 4*4, and it can cover all gfx9 ASICs
4933 	 */
4934 	if (adev->gfx.config.max_shader_engines *
4935 		adev->gfx.config.max_sh_per_se > 16)
4936 		return -EINVAL;
4937 
4938 	amdgpu_gfx_parse_disable_cu(disable_masks,
4939 				    adev->gfx.config.max_shader_engines,
4940 				    adev->gfx.config.max_sh_per_se);
4941 
4942 	mutex_lock(&adev->grbm_idx_mutex);
4943 	for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) {
4944 		is_symmetric_cus = true;
4945 		for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
4946 			for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
4947 				mask = 1;
4948 				ao_bitmap = 0;
4949 				counter = 0;
4950 				gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff, xcc_id);
4951 				gfx_v9_4_3_set_user_cu_inactive_bitmap(
4952 					adev,
4953 					disable_masks[i * adev->gfx.config.max_sh_per_se + j],
4954 					xcc_id);
4955 				bitmap = gfx_v9_4_3_get_cu_active_bitmap(adev, xcc_id);
4956 
4957 				cu_info->bitmap[xcc_id][i][j] = bitmap;
4958 
4959 				for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
4960 					if (bitmap & mask) {
4961 						if (counter < adev->gfx.config.max_cu_per_sh)
4962 							ao_bitmap |= mask;
4963 						counter++;
4964 					}
4965 					mask <<= 1;
4966 				}
4967 				active_cu_number += counter;
4968 				if (i < 2 && j < 2)
4969 					ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
4970 				cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
4971 			}
4972 			if (i && is_symmetric_cus && prev_counter != counter)
4973 				is_symmetric_cus = false;
4974 			prev_counter = counter;
4975 		}
4976 		if (is_symmetric_cus) {
4977 			tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_DEBUG);
4978 			tmp = REG_SET_FIELD(tmp, CP_CPC_DEBUG, CPC_HARVESTING_RELAUNCH_DISABLE, 1);
4979 			tmp = REG_SET_FIELD(tmp, CP_CPC_DEBUG, CPC_HARVESTING_DISPATCH_DISABLE, 1);
4980 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_DEBUG, tmp);
4981 		}
4982 		gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
4983 					    xcc_id);
4984 	}
4985 	mutex_unlock(&adev->grbm_idx_mutex);
4986 
4987 	cu_info->number = active_cu_number;
4988 	cu_info->ao_cu_mask = ao_cu_mask;
4989 	cu_info->simd_per_cu = NUM_SIMD_PER_CU;
4990 
4991 	return 0;
4992 }
4993 
4994 const struct amdgpu_ip_block_version gfx_v9_4_3_ip_block = {
4995 	.type = AMD_IP_BLOCK_TYPE_GFX,
4996 	.major = 9,
4997 	.minor = 4,
4998 	.rev = 3,
4999 	.funcs = &gfx_v9_4_3_ip_funcs,
5000 };
5001 
gfx_v9_4_3_xcp_resume(void * handle,uint32_t inst_mask)5002 static int gfx_v9_4_3_xcp_resume(void *handle, uint32_t inst_mask)
5003 {
5004 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5005 	uint32_t tmp_mask;
5006 	int i, r;
5007 
5008 	/* TODO : Initialize golden regs */
5009 	/* gfx_v9_4_3_init_golden_registers(adev); */
5010 
5011 	tmp_mask = inst_mask;
5012 	for_each_inst(i, tmp_mask)
5013 		gfx_v9_4_3_xcc_constants_init(adev, i);
5014 
5015 	if (!amdgpu_sriov_vf(adev)) {
5016 		tmp_mask = inst_mask;
5017 		for_each_inst(i, tmp_mask) {
5018 			r = gfx_v9_4_3_xcc_rlc_resume(adev, i);
5019 			if (r)
5020 				return r;
5021 		}
5022 	}
5023 
5024 	tmp_mask = inst_mask;
5025 	for_each_inst(i, tmp_mask) {
5026 		r = gfx_v9_4_3_xcc_cp_resume(adev, i);
5027 		if (r)
5028 			return r;
5029 	}
5030 
5031 	return 0;
5032 }
5033 
gfx_v9_4_3_xcp_suspend(void * handle,uint32_t inst_mask)5034 static int gfx_v9_4_3_xcp_suspend(void *handle, uint32_t inst_mask)
5035 {
5036 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5037 	int i;
5038 
5039 	for_each_inst(i, inst_mask)
5040 		gfx_v9_4_3_xcc_fini(adev, i);
5041 
5042 	return 0;
5043 }
5044 
5045 struct amdgpu_xcp_ip_funcs gfx_v9_4_3_xcp_funcs = {
5046 	.suspend = &gfx_v9_4_3_xcp_suspend,
5047 	.resume = &gfx_v9_4_3_xcp_resume
5048 };
5049 
5050 struct amdgpu_ras_block_hw_ops  gfx_v9_4_3_ras_ops = {
5051 	.query_ras_error_count = &gfx_v9_4_3_query_ras_error_count,
5052 	.reset_ras_error_count = &gfx_v9_4_3_reset_ras_error_count,
5053 };
5054 
gfx_v9_4_3_ras_late_init(struct amdgpu_device * adev,struct ras_common_if * ras_block)5055 static int gfx_v9_4_3_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
5056 {
5057 	int r;
5058 
5059 	r = amdgpu_ras_block_late_init(adev, ras_block);
5060 	if (r)
5061 		return r;
5062 
5063 	r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__GFX,
5064 				&gfx_v9_4_3_aca_info,
5065 				NULL);
5066 	if (r)
5067 		goto late_fini;
5068 
5069 	return 0;
5070 
5071 late_fini:
5072 	amdgpu_ras_block_late_fini(adev, ras_block);
5073 
5074 	return r;
5075 }
5076 
5077 struct amdgpu_gfx_ras gfx_v9_4_3_ras = {
5078 	.ras_block = {
5079 		.hw_ops = &gfx_v9_4_3_ras_ops,
5080 		.ras_late_init = &gfx_v9_4_3_ras_late_init,
5081 	},
5082 	.enable_watchdog_timer = &gfx_v9_4_3_enable_watchdog_timer,
5083 };
5084