• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2020 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "amdgpu.h"
24 #include "soc15.h"
25 #include "soc15d.h"
26 
27 #include "gc/gc_9_4_2_offset.h"
28 #include "gc/gc_9_4_2_sh_mask.h"
29 #include "gfx_v9_0.h"
30 
31 #include "gfx_v9_4_2.h"
32 #include "amdgpu_ras.h"
33 #include "amdgpu_gfx.h"
34 
35 #define SE_ID_MAX 8
36 #define CU_ID_MAX 16
37 #define SIMD_ID_MAX 4
38 #define WAVE_ID_MAX 10
39 
40 enum gfx_v9_4_2_utc_type {
41 	VML2_MEM,
42 	VML2_WALKER_MEM,
43 	UTCL2_MEM,
44 	ATC_L2_CACHE_2M,
45 	ATC_L2_CACHE_32K,
46 	ATC_L2_CACHE_4K
47 };
48 
49 struct gfx_v9_4_2_utc_block {
50 	enum gfx_v9_4_2_utc_type type;
51 	uint32_t num_banks;
52 	uint32_t num_ways;
53 	uint32_t num_mem_blocks;
54 	struct soc15_reg idx_reg;
55 	struct soc15_reg data_reg;
56 	uint32_t sec_count_mask;
57 	uint32_t sec_count_shift;
58 	uint32_t ded_count_mask;
59 	uint32_t ded_count_shift;
60 	uint32_t clear;
61 };
62 
63 static const struct soc15_reg_golden golden_settings_gc_9_4_2_alde_die_0[] = {
64 	SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_0, 0x3fffffff, 0x141dc920),
65 	SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_1, 0x3fffffff, 0x3b458b93),
66 	SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_2, 0x3fffffff, 0x1a4f5583),
67 	SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_3, 0x3fffffff, 0x317717f6),
68 	SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_4, 0x3fffffff, 0x107cc1e6),
69 	SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_5, 0x3ff, 0x351),
70 };
71 
72 static const struct soc15_reg_golden golden_settings_gc_9_4_2_alde_die_1[] = {
73 	SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_0, 0x3fffffff, 0x2591aa38),
74 	SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_1, 0x3fffffff, 0xac9e88b),
75 	SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_2, 0x3fffffff, 0x2bc3369b),
76 	SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_3, 0x3fffffff, 0xfb74ee),
77 	SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_4, 0x3fffffff, 0x21f0a2fe),
78 	SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_5, 0x3ff, 0x49),
79 };
80 
81 static const struct soc15_reg_golden golden_settings_gc_9_4_2_alde[] = {
82 	SOC15_REG_GOLDEN_VALUE(GC, 0, regGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
83 	SOC15_REG_GOLDEN_VALUE(GC, 0, regTA_CNTL_AUX, 0xfffffeef, 0x10b0000),
84 	SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_UTCL1_CNTL1, 0xffffffff, 0x30800400),
85 	SOC15_REG_GOLDEN_VALUE(GC, 0, regTCI_CNTL_3, 0xff, 0x20),
86 };
87 
88 /*
89  * This shader is used to clear VGPRS and LDS, and also write the input
90  * pattern into the write back buffer, which will be used by driver to
91  * check whether all SIMDs have been covered.
92 */
93 static const u32 vgpr_init_compute_shader_aldebaran[] = {
94 	0xb8840904, 0xb8851a04, 0xb8861344, 0xb8831804, 0x9208ff06, 0x00000280,
95 	0x9209a805, 0x920a8a04, 0x81080908, 0x81080a08, 0x81080308, 0x8e078208,
96 	0x81078407, 0xc0410080, 0x00000007, 0xbf8c0000, 0xbf8a0000, 0xd3d94000,
97 	0x18000080, 0xd3d94001, 0x18000080, 0xd3d94002, 0x18000080, 0xd3d94003,
98 	0x18000080, 0xd3d94004, 0x18000080, 0xd3d94005, 0x18000080, 0xd3d94006,
99 	0x18000080, 0xd3d94007, 0x18000080, 0xd3d94008, 0x18000080, 0xd3d94009,
100 	0x18000080, 0xd3d9400a, 0x18000080, 0xd3d9400b, 0x18000080, 0xd3d9400c,
101 	0x18000080, 0xd3d9400d, 0x18000080, 0xd3d9400e, 0x18000080, 0xd3d9400f,
102 	0x18000080, 0xd3d94010, 0x18000080, 0xd3d94011, 0x18000080, 0xd3d94012,
103 	0x18000080, 0xd3d94013, 0x18000080, 0xd3d94014, 0x18000080, 0xd3d94015,
104 	0x18000080, 0xd3d94016, 0x18000080, 0xd3d94017, 0x18000080, 0xd3d94018,
105 	0x18000080, 0xd3d94019, 0x18000080, 0xd3d9401a, 0x18000080, 0xd3d9401b,
106 	0x18000080, 0xd3d9401c, 0x18000080, 0xd3d9401d, 0x18000080, 0xd3d9401e,
107 	0x18000080, 0xd3d9401f, 0x18000080, 0xd3d94020, 0x18000080, 0xd3d94021,
108 	0x18000080, 0xd3d94022, 0x18000080, 0xd3d94023, 0x18000080, 0xd3d94024,
109 	0x18000080, 0xd3d94025, 0x18000080, 0xd3d94026, 0x18000080, 0xd3d94027,
110 	0x18000080, 0xd3d94028, 0x18000080, 0xd3d94029, 0x18000080, 0xd3d9402a,
111 	0x18000080, 0xd3d9402b, 0x18000080, 0xd3d9402c, 0x18000080, 0xd3d9402d,
112 	0x18000080, 0xd3d9402e, 0x18000080, 0xd3d9402f, 0x18000080, 0xd3d94030,
113 	0x18000080, 0xd3d94031, 0x18000080, 0xd3d94032, 0x18000080, 0xd3d94033,
114 	0x18000080, 0xd3d94034, 0x18000080, 0xd3d94035, 0x18000080, 0xd3d94036,
115 	0x18000080, 0xd3d94037, 0x18000080, 0xd3d94038, 0x18000080, 0xd3d94039,
116 	0x18000080, 0xd3d9403a, 0x18000080, 0xd3d9403b, 0x18000080, 0xd3d9403c,
117 	0x18000080, 0xd3d9403d, 0x18000080, 0xd3d9403e, 0x18000080, 0xd3d9403f,
118 	0x18000080, 0xd3d94040, 0x18000080, 0xd3d94041, 0x18000080, 0xd3d94042,
119 	0x18000080, 0xd3d94043, 0x18000080, 0xd3d94044, 0x18000080, 0xd3d94045,
120 	0x18000080, 0xd3d94046, 0x18000080, 0xd3d94047, 0x18000080, 0xd3d94048,
121 	0x18000080, 0xd3d94049, 0x18000080, 0xd3d9404a, 0x18000080, 0xd3d9404b,
122 	0x18000080, 0xd3d9404c, 0x18000080, 0xd3d9404d, 0x18000080, 0xd3d9404e,
123 	0x18000080, 0xd3d9404f, 0x18000080, 0xd3d94050, 0x18000080, 0xd3d94051,
124 	0x18000080, 0xd3d94052, 0x18000080, 0xd3d94053, 0x18000080, 0xd3d94054,
125 	0x18000080, 0xd3d94055, 0x18000080, 0xd3d94056, 0x18000080, 0xd3d94057,
126 	0x18000080, 0xd3d94058, 0x18000080, 0xd3d94059, 0x18000080, 0xd3d9405a,
127 	0x18000080, 0xd3d9405b, 0x18000080, 0xd3d9405c, 0x18000080, 0xd3d9405d,
128 	0x18000080, 0xd3d9405e, 0x18000080, 0xd3d9405f, 0x18000080, 0xd3d94060,
129 	0x18000080, 0xd3d94061, 0x18000080, 0xd3d94062, 0x18000080, 0xd3d94063,
130 	0x18000080, 0xd3d94064, 0x18000080, 0xd3d94065, 0x18000080, 0xd3d94066,
131 	0x18000080, 0xd3d94067, 0x18000080, 0xd3d94068, 0x18000080, 0xd3d94069,
132 	0x18000080, 0xd3d9406a, 0x18000080, 0xd3d9406b, 0x18000080, 0xd3d9406c,
133 	0x18000080, 0xd3d9406d, 0x18000080, 0xd3d9406e, 0x18000080, 0xd3d9406f,
134 	0x18000080, 0xd3d94070, 0x18000080, 0xd3d94071, 0x18000080, 0xd3d94072,
135 	0x18000080, 0xd3d94073, 0x18000080, 0xd3d94074, 0x18000080, 0xd3d94075,
136 	0x18000080, 0xd3d94076, 0x18000080, 0xd3d94077, 0x18000080, 0xd3d94078,
137 	0x18000080, 0xd3d94079, 0x18000080, 0xd3d9407a, 0x18000080, 0xd3d9407b,
138 	0x18000080, 0xd3d9407c, 0x18000080, 0xd3d9407d, 0x18000080, 0xd3d9407e,
139 	0x18000080, 0xd3d9407f, 0x18000080, 0xd3d94080, 0x18000080, 0xd3d94081,
140 	0x18000080, 0xd3d94082, 0x18000080, 0xd3d94083, 0x18000080, 0xd3d94084,
141 	0x18000080, 0xd3d94085, 0x18000080, 0xd3d94086, 0x18000080, 0xd3d94087,
142 	0x18000080, 0xd3d94088, 0x18000080, 0xd3d94089, 0x18000080, 0xd3d9408a,
143 	0x18000080, 0xd3d9408b, 0x18000080, 0xd3d9408c, 0x18000080, 0xd3d9408d,
144 	0x18000080, 0xd3d9408e, 0x18000080, 0xd3d9408f, 0x18000080, 0xd3d94090,
145 	0x18000080, 0xd3d94091, 0x18000080, 0xd3d94092, 0x18000080, 0xd3d94093,
146 	0x18000080, 0xd3d94094, 0x18000080, 0xd3d94095, 0x18000080, 0xd3d94096,
147 	0x18000080, 0xd3d94097, 0x18000080, 0xd3d94098, 0x18000080, 0xd3d94099,
148 	0x18000080, 0xd3d9409a, 0x18000080, 0xd3d9409b, 0x18000080, 0xd3d9409c,
149 	0x18000080, 0xd3d9409d, 0x18000080, 0xd3d9409e, 0x18000080, 0xd3d9409f,
150 	0x18000080, 0xd3d940a0, 0x18000080, 0xd3d940a1, 0x18000080, 0xd3d940a2,
151 	0x18000080, 0xd3d940a3, 0x18000080, 0xd3d940a4, 0x18000080, 0xd3d940a5,
152 	0x18000080, 0xd3d940a6, 0x18000080, 0xd3d940a7, 0x18000080, 0xd3d940a8,
153 	0x18000080, 0xd3d940a9, 0x18000080, 0xd3d940aa, 0x18000080, 0xd3d940ab,
154 	0x18000080, 0xd3d940ac, 0x18000080, 0xd3d940ad, 0x18000080, 0xd3d940ae,
155 	0x18000080, 0xd3d940af, 0x18000080, 0xd3d940b0, 0x18000080, 0xd3d940b1,
156 	0x18000080, 0xd3d940b2, 0x18000080, 0xd3d940b3, 0x18000080, 0xd3d940b4,
157 	0x18000080, 0xd3d940b5, 0x18000080, 0xd3d940b6, 0x18000080, 0xd3d940b7,
158 	0x18000080, 0xd3d940b8, 0x18000080, 0xd3d940b9, 0x18000080, 0xd3d940ba,
159 	0x18000080, 0xd3d940bb, 0x18000080, 0xd3d940bc, 0x18000080, 0xd3d940bd,
160 	0x18000080, 0xd3d940be, 0x18000080, 0xd3d940bf, 0x18000080, 0xd3d940c0,
161 	0x18000080, 0xd3d940c1, 0x18000080, 0xd3d940c2, 0x18000080, 0xd3d940c3,
162 	0x18000080, 0xd3d940c4, 0x18000080, 0xd3d940c5, 0x18000080, 0xd3d940c6,
163 	0x18000080, 0xd3d940c7, 0x18000080, 0xd3d940c8, 0x18000080, 0xd3d940c9,
164 	0x18000080, 0xd3d940ca, 0x18000080, 0xd3d940cb, 0x18000080, 0xd3d940cc,
165 	0x18000080, 0xd3d940cd, 0x18000080, 0xd3d940ce, 0x18000080, 0xd3d940cf,
166 	0x18000080, 0xd3d940d0, 0x18000080, 0xd3d940d1, 0x18000080, 0xd3d940d2,
167 	0x18000080, 0xd3d940d3, 0x18000080, 0xd3d940d4, 0x18000080, 0xd3d940d5,
168 	0x18000080, 0xd3d940d6, 0x18000080, 0xd3d940d7, 0x18000080, 0xd3d940d8,
169 	0x18000080, 0xd3d940d9, 0x18000080, 0xd3d940da, 0x18000080, 0xd3d940db,
170 	0x18000080, 0xd3d940dc, 0x18000080, 0xd3d940dd, 0x18000080, 0xd3d940de,
171 	0x18000080, 0xd3d940df, 0x18000080, 0xd3d940e0, 0x18000080, 0xd3d940e1,
172 	0x18000080, 0xd3d940e2, 0x18000080, 0xd3d940e3, 0x18000080, 0xd3d940e4,
173 	0x18000080, 0xd3d940e5, 0x18000080, 0xd3d940e6, 0x18000080, 0xd3d940e7,
174 	0x18000080, 0xd3d940e8, 0x18000080, 0xd3d940e9, 0x18000080, 0xd3d940ea,
175 	0x18000080, 0xd3d940eb, 0x18000080, 0xd3d940ec, 0x18000080, 0xd3d940ed,
176 	0x18000080, 0xd3d940ee, 0x18000080, 0xd3d940ef, 0x18000080, 0xd3d940f0,
177 	0x18000080, 0xd3d940f1, 0x18000080, 0xd3d940f2, 0x18000080, 0xd3d940f3,
178 	0x18000080, 0xd3d940f4, 0x18000080, 0xd3d940f5, 0x18000080, 0xd3d940f6,
179 	0x18000080, 0xd3d940f7, 0x18000080, 0xd3d940f8, 0x18000080, 0xd3d940f9,
180 	0x18000080, 0xd3d940fa, 0x18000080, 0xd3d940fb, 0x18000080, 0xd3d940fc,
181 	0x18000080, 0xd3d940fd, 0x18000080, 0xd3d940fe, 0x18000080, 0xd3d940ff,
182 	0x18000080, 0xb07c0000, 0xbe8a00ff, 0x000000f8, 0xbf11080a, 0x7e000280,
183 	0x7e020280, 0x7e040280, 0x7e060280, 0x7e080280, 0x7e0a0280, 0x7e0c0280,
184 	0x7e0e0280, 0x808a880a, 0xbe80320a, 0xbf84fff5, 0xbf9c0000, 0xd28c0001,
185 	0x0001007f, 0xd28d0001, 0x0002027e, 0x10020288, 0xbe8b0004, 0xb78b4000,
186 	0xd1196a01, 0x00001701, 0xbe8a0087, 0xbefc00c1, 0xd89c4000, 0x00020201,
187 	0xd89cc080, 0x00040401, 0x320202ff, 0x00000800, 0x808a810a, 0xbf84fff8,
188 	0xbf810000,
189 };
190 
191 const struct soc15_reg_entry vgpr_init_regs_aldebaran[] = {
192 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
193 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_X), 0x40 },
194 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_Y), 4 },
195 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_Z), 1 },
196 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC1), 0xbf },
197 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC2), 0x400006 },  /* 64KB LDS */
198 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC3), 0x3F }, /*  63 - accum-offset = 256 */
199 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
200 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
201 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
202 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
203 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
204 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
205 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
206 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
207 };
208 
209 /*
210  * The below shaders are used to clear SGPRS, and also write the input
211  * pattern into the write back buffer. The first two dispatch should be
212  * scheduled simultaneously which make sure that all SGPRS could be
213  * allocated, so the dispatch 1 need check write back buffer before scheduled,
214  * make sure that waves of dispatch 0 are all dispacthed to all simds
215  * balanced. both dispatch 0 and dispatch 1 should be halted until all waves
216  * are dispatched, and then driver write a pattern to the shared memory to make
217  * all waves continue.
218 */
219 static const u32 sgpr112_init_compute_shader_aldebaran[] = {
220 	0xb8840904, 0xb8851a04, 0xb8861344, 0xb8831804, 0x9208ff06, 0x00000280,
221 	0x9209a805, 0x920a8a04, 0x81080908, 0x81080a08, 0x81080308, 0x8e078208,
222 	0x81078407, 0xc0410080, 0x00000007, 0xbf8c0000, 0xbf8e003f, 0xc0030200,
223 	0x00000000, 0xbf8c0000, 0xbf06ff08, 0xdeadbeaf, 0xbf84fff9, 0x81028102,
224 	0xc0410080, 0x00000007, 0xbf8c0000, 0xbf8a0000, 0xbefc0080, 0xbeea0080,
225 	0xbeeb0080, 0xbf00f280, 0xbee60080, 0xbee70080, 0xbee80080, 0xbee90080,
226 	0xbefe0080, 0xbeff0080, 0xbe880080, 0xbe890080, 0xbe8a0080, 0xbe8b0080,
227 	0xbe8c0080, 0xbe8d0080, 0xbe8e0080, 0xbe8f0080, 0xbe900080, 0xbe910080,
228 	0xbe920080, 0xbe930080, 0xbe940080, 0xbe950080, 0xbe960080, 0xbe970080,
229 	0xbe980080, 0xbe990080, 0xbe9a0080, 0xbe9b0080, 0xbe9c0080, 0xbe9d0080,
230 	0xbe9e0080, 0xbe9f0080, 0xbea00080, 0xbea10080, 0xbea20080, 0xbea30080,
231 	0xbea40080, 0xbea50080, 0xbea60080, 0xbea70080, 0xbea80080, 0xbea90080,
232 	0xbeaa0080, 0xbeab0080, 0xbeac0080, 0xbead0080, 0xbeae0080, 0xbeaf0080,
233 	0xbeb00080, 0xbeb10080, 0xbeb20080, 0xbeb30080, 0xbeb40080, 0xbeb50080,
234 	0xbeb60080, 0xbeb70080, 0xbeb80080, 0xbeb90080, 0xbeba0080, 0xbebb0080,
235 	0xbebc0080, 0xbebd0080, 0xbebe0080, 0xbebf0080, 0xbec00080, 0xbec10080,
236 	0xbec20080, 0xbec30080, 0xbec40080, 0xbec50080, 0xbec60080, 0xbec70080,
237 	0xbec80080, 0xbec90080, 0xbeca0080, 0xbecb0080, 0xbecc0080, 0xbecd0080,
238 	0xbece0080, 0xbecf0080, 0xbed00080, 0xbed10080, 0xbed20080, 0xbed30080,
239 	0xbed40080, 0xbed50080, 0xbed60080, 0xbed70080, 0xbed80080, 0xbed90080,
240 	0xbeda0080, 0xbedb0080, 0xbedc0080, 0xbedd0080, 0xbede0080, 0xbedf0080,
241 	0xbee00080, 0xbee10080, 0xbee20080, 0xbee30080, 0xbee40080, 0xbee50080,
242 	0xbf810000
243 };
244 
245 const struct soc15_reg_entry sgpr112_init_regs_aldebaran[] = {
246 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
247 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_X), 0x40 },
248 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_Y), 8 },
249 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_Z), 1 },
250 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC1), 0x340 },
251 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC2), 0x6 },
252 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC3), 0x0 },
253 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
254 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
255 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
256 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
257 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
258 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
259 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
260 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
261 };
262 
263 static const u32 sgpr96_init_compute_shader_aldebaran[] = {
264 	0xb8840904, 0xb8851a04, 0xb8861344, 0xb8831804, 0x9208ff06, 0x00000280,
265 	0x9209a805, 0x920a8a04, 0x81080908, 0x81080a08, 0x81080308, 0x8e078208,
266 	0x81078407, 0xc0410080, 0x00000007, 0xbf8c0000, 0xbf8e003f, 0xc0030200,
267 	0x00000000, 0xbf8c0000, 0xbf06ff08, 0xdeadbeaf, 0xbf84fff9, 0x81028102,
268 	0xc0410080, 0x00000007, 0xbf8c0000, 0xbf8a0000, 0xbefc0080, 0xbeea0080,
269 	0xbeeb0080, 0xbf00f280, 0xbee60080, 0xbee70080, 0xbee80080, 0xbee90080,
270 	0xbefe0080, 0xbeff0080, 0xbe880080, 0xbe890080, 0xbe8a0080, 0xbe8b0080,
271 	0xbe8c0080, 0xbe8d0080, 0xbe8e0080, 0xbe8f0080, 0xbe900080, 0xbe910080,
272 	0xbe920080, 0xbe930080, 0xbe940080, 0xbe950080, 0xbe960080, 0xbe970080,
273 	0xbe980080, 0xbe990080, 0xbe9a0080, 0xbe9b0080, 0xbe9c0080, 0xbe9d0080,
274 	0xbe9e0080, 0xbe9f0080, 0xbea00080, 0xbea10080, 0xbea20080, 0xbea30080,
275 	0xbea40080, 0xbea50080, 0xbea60080, 0xbea70080, 0xbea80080, 0xbea90080,
276 	0xbeaa0080, 0xbeab0080, 0xbeac0080, 0xbead0080, 0xbeae0080, 0xbeaf0080,
277 	0xbeb00080, 0xbeb10080, 0xbeb20080, 0xbeb30080, 0xbeb40080, 0xbeb50080,
278 	0xbeb60080, 0xbeb70080, 0xbeb80080, 0xbeb90080, 0xbeba0080, 0xbebb0080,
279 	0xbebc0080, 0xbebd0080, 0xbebe0080, 0xbebf0080, 0xbec00080, 0xbec10080,
280 	0xbec20080, 0xbec30080, 0xbec40080, 0xbec50080, 0xbec60080, 0xbec70080,
281 	0xbec80080, 0xbec90080, 0xbeca0080, 0xbecb0080, 0xbecc0080, 0xbecd0080,
282 	0xbece0080, 0xbecf0080, 0xbed00080, 0xbed10080, 0xbed20080, 0xbed30080,
283 	0xbed40080, 0xbed50080, 0xbed60080, 0xbed70080, 0xbed80080, 0xbed90080,
284 	0xbf810000,
285 };
286 
287 const struct soc15_reg_entry sgpr96_init_regs_aldebaran[] = {
288 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
289 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_X), 0x40 },
290 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_Y), 0xc },
291 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_Z), 1 },
292 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC1), 0x2c0 },
293 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC2), 0x6 },
294 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC3), 0x0 },
295 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
296 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
297 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
298 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
299 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
300 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
301 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
302 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
303 };
304 
305 /*
306  * This shader is used to clear the uninitiated sgprs after the above
307  * two dispatches, because of hardware feature, dispath 0 couldn't clear
308  * top hole sgprs. Therefore need 4 waves per SIMD to cover these sgprs
309 */
310 static const u32 sgpr64_init_compute_shader_aldebaran[] = {
311 	0xb8840904, 0xb8851a04, 0xb8861344, 0xb8831804, 0x9208ff06, 0x00000280,
312 	0x9209a805, 0x920a8a04, 0x81080908, 0x81080a08, 0x81080308, 0x8e078208,
313 	0x81078407, 0xc0410080, 0x00000007, 0xbf8c0000, 0xbf8e003f, 0xc0030200,
314 	0x00000000, 0xbf8c0000, 0xbf06ff08, 0xdeadbeaf, 0xbf84fff9, 0x81028102,
315 	0xc0410080, 0x00000007, 0xbf8c0000, 0xbf8a0000, 0xbefc0080, 0xbeea0080,
316 	0xbeeb0080, 0xbf00f280, 0xbee60080, 0xbee70080, 0xbee80080, 0xbee90080,
317 	0xbefe0080, 0xbeff0080, 0xbe880080, 0xbe890080, 0xbe8a0080, 0xbe8b0080,
318 	0xbe8c0080, 0xbe8d0080, 0xbe8e0080, 0xbe8f0080, 0xbe900080, 0xbe910080,
319 	0xbe920080, 0xbe930080, 0xbe940080, 0xbe950080, 0xbe960080, 0xbe970080,
320 	0xbe980080, 0xbe990080, 0xbe9a0080, 0xbe9b0080, 0xbe9c0080, 0xbe9d0080,
321 	0xbe9e0080, 0xbe9f0080, 0xbea00080, 0xbea10080, 0xbea20080, 0xbea30080,
322 	0xbea40080, 0xbea50080, 0xbea60080, 0xbea70080, 0xbea80080, 0xbea90080,
323 	0xbeaa0080, 0xbeab0080, 0xbeac0080, 0xbead0080, 0xbeae0080, 0xbeaf0080,
324 	0xbeb00080, 0xbeb10080, 0xbeb20080, 0xbeb30080, 0xbeb40080, 0xbeb50080,
325 	0xbeb60080, 0xbeb70080, 0xbeb80080, 0xbeb90080, 0xbf810000,
326 };
327 
328 const struct soc15_reg_entry sgpr64_init_regs_aldebaran[] = {
329 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
330 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_X), 0x40 },
331 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_Y), 0x10 },
332 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_Z), 1 },
333 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC1), 0x1c0 },
334 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC2), 0x6 },
335 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC3), 0x0 },
336 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
337 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
338 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
339 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
340 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
341 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
342 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
343 	{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
344 };
345 
gfx_v9_4_2_run_shader(struct amdgpu_device * adev,struct amdgpu_ring * ring,struct amdgpu_ib * ib,const u32 * shader_ptr,u32 shader_size,const struct soc15_reg_entry * init_regs,u32 regs_size,u32 compute_dim_x,u64 wb_gpu_addr,u32 pattern,struct dma_fence ** fence_ptr)346 static int gfx_v9_4_2_run_shader(struct amdgpu_device *adev,
347 				 struct amdgpu_ring *ring,
348 				 struct amdgpu_ib *ib,
349 				 const u32 *shader_ptr, u32 shader_size,
350 				 const struct soc15_reg_entry *init_regs, u32 regs_size,
351 				 u32 compute_dim_x, u64 wb_gpu_addr, u32 pattern,
352 				 struct dma_fence **fence_ptr)
353 {
354 	int r, i;
355 	uint32_t total_size, shader_offset;
356 	u64 gpu_addr;
357 
358 	total_size = (regs_size * 3 + 4 + 5 + 5) * 4;
359 	total_size = ALIGN(total_size, 256);
360 	shader_offset = total_size;
361 	total_size += ALIGN(shader_size, 256);
362 
363 	/* allocate an indirect buffer to put the commands in */
364 	memset(ib, 0, sizeof(*ib));
365 	r = amdgpu_ib_get(adev, NULL, total_size,
366 					AMDGPU_IB_POOL_DIRECT, ib);
367 	if (r) {
368 		dev_err(adev->dev, "failed to get ib (%d).\n", r);
369 		return r;
370 	}
371 
372 	/* load the compute shaders */
373 	for (i = 0; i < shader_size/sizeof(u32); i++)
374 		ib->ptr[i + (shader_offset / 4)] = shader_ptr[i];
375 
376 	/* init the ib length to 0 */
377 	ib->length_dw = 0;
378 
379 	/* write the register state for the compute dispatch */
380 	for (i = 0; i < regs_size; i++) {
381 		ib->ptr[ib->length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
382 		ib->ptr[ib->length_dw++] = SOC15_REG_ENTRY_OFFSET(init_regs[i])
383 								- PACKET3_SET_SH_REG_START;
384 		ib->ptr[ib->length_dw++] = init_regs[i].reg_value;
385 	}
386 
387 	/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
388 	gpu_addr = (ib->gpu_addr + (u64)shader_offset) >> 8;
389 	ib->ptr[ib->length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
390 	ib->ptr[ib->length_dw++] = SOC15_REG_OFFSET(GC, 0, regCOMPUTE_PGM_LO)
391 							- PACKET3_SET_SH_REG_START;
392 	ib->ptr[ib->length_dw++] = lower_32_bits(gpu_addr);
393 	ib->ptr[ib->length_dw++] = upper_32_bits(gpu_addr);
394 
395 	/* write the wb buffer address */
396 	ib->ptr[ib->length_dw++] = PACKET3(PACKET3_SET_SH_REG, 3);
397 	ib->ptr[ib->length_dw++] = SOC15_REG_OFFSET(GC, 0, regCOMPUTE_USER_DATA_0)
398 							- PACKET3_SET_SH_REG_START;
399 	ib->ptr[ib->length_dw++] = lower_32_bits(wb_gpu_addr);
400 	ib->ptr[ib->length_dw++] = upper_32_bits(wb_gpu_addr);
401 	ib->ptr[ib->length_dw++] = pattern;
402 
403 	/* write dispatch packet */
404 	ib->ptr[ib->length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
405 	ib->ptr[ib->length_dw++] = compute_dim_x; /* x */
406 	ib->ptr[ib->length_dw++] = 1; /* y */
407 	ib->ptr[ib->length_dw++] = 1; /* z */
408 	ib->ptr[ib->length_dw++] =
409 		REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
410 
411 	/* shedule the ib on the ring */
412 	r = amdgpu_ib_schedule(ring, 1, ib, NULL, fence_ptr);
413 	if (r) {
414 		dev_err(adev->dev, "ib submit failed (%d).\n", r);
415 		amdgpu_ib_free(adev, ib, NULL);
416 	}
417 	return r;
418 }
419 
gfx_v9_4_2_log_wave_assignment(struct amdgpu_device * adev,uint32_t * wb_ptr)420 static void gfx_v9_4_2_log_wave_assignment(struct amdgpu_device *adev, uint32_t *wb_ptr)
421 {
422 	uint32_t se, cu, simd, wave;
423 	uint32_t offset = 0;
424 	char *str;
425 	int size;
426 
427 	str = kmalloc(256, GFP_KERNEL);
428 	if (!str)
429 		return;
430 
431 	dev_dbg(adev->dev, "wave assignment:\n");
432 
433 	for (se = 0; se < adev->gfx.config.max_shader_engines; se++) {
434 		for (cu = 0; cu < CU_ID_MAX; cu++) {
435 			memset(str, 0, 256);
436 			size = sprintf(str, "SE[%02d]CU[%02d]: ", se, cu);
437 			for (simd = 0; simd < SIMD_ID_MAX; simd++) {
438 				size += sprintf(str + size, "[");
439 				for (wave = 0; wave < WAVE_ID_MAX; wave++) {
440 					size += sprintf(str + size, "%x", wb_ptr[offset]);
441 					offset++;
442 				}
443 				size += sprintf(str + size, "]  ");
444 			}
445 			dev_dbg(adev->dev, "%s\n", str);
446 		}
447 	}
448 
449 	kfree(str);
450 }
451 
gfx_v9_4_2_wait_for_waves_assigned(struct amdgpu_device * adev,uint32_t * wb_ptr,uint32_t mask,uint32_t pattern,uint32_t num_wave,bool wait)452 static int gfx_v9_4_2_wait_for_waves_assigned(struct amdgpu_device *adev,
453 					      uint32_t *wb_ptr, uint32_t mask,
454 					      uint32_t pattern, uint32_t num_wave, bool wait)
455 {
456 	uint32_t se, cu, simd, wave;
457 	uint32_t loop = 0;
458 	uint32_t wave_cnt;
459 	uint32_t offset;
460 
461 	do {
462 		wave_cnt = 0;
463 		offset = 0;
464 
465 		for (se = 0; se < adev->gfx.config.max_shader_engines; se++)
466 			for (cu = 0; cu < CU_ID_MAX; cu++)
467 				for (simd = 0; simd < SIMD_ID_MAX; simd++)
468 					for (wave = 0; wave < WAVE_ID_MAX; wave++) {
469 						if (((1 << wave) & mask) &&
470 						    (wb_ptr[offset] == pattern))
471 							wave_cnt++;
472 
473 						offset++;
474 					}
475 
476 		if (wave_cnt == num_wave)
477 			return 0;
478 
479 		mdelay(1);
480 	} while (++loop < 2000 && wait);
481 
482 	dev_err(adev->dev, "actual wave num: %d, expected wave num: %d\n",
483 		wave_cnt, num_wave);
484 
485 	gfx_v9_4_2_log_wave_assignment(adev, wb_ptr);
486 
487 	return -EBADSLT;
488 }
489 
gfx_v9_4_2_do_sgprs_init(struct amdgpu_device * adev)490 static int gfx_v9_4_2_do_sgprs_init(struct amdgpu_device *adev)
491 {
492 	int r;
493 	int wb_size = adev->gfx.config.max_shader_engines *
494 			 CU_ID_MAX * SIMD_ID_MAX * WAVE_ID_MAX;
495 	struct amdgpu_ib wb_ib;
496 	struct amdgpu_ib disp_ibs[3];
497 	struct dma_fence *fences[3];
498 	u32 pattern[3] = { 0x1, 0x5, 0xa };
499 
500 	/* bail if the compute ring is not ready */
501 	if (!adev->gfx.compute_ring[0].sched.ready ||
502 		 !adev->gfx.compute_ring[1].sched.ready)
503 		return 0;
504 
505 	/* allocate the write-back buffer from IB */
506 	memset(&wb_ib, 0, sizeof(wb_ib));
507 	r = amdgpu_ib_get(adev, NULL, (1 + wb_size) * sizeof(uint32_t),
508 			  AMDGPU_IB_POOL_DIRECT, &wb_ib);
509 	if (r) {
510 		dev_err(adev->dev, "failed to get ib (%d) for wb\n", r);
511 		return r;
512 	}
513 	memset(wb_ib.ptr, 0, (1 + wb_size) * sizeof(uint32_t));
514 
515 	r = gfx_v9_4_2_run_shader(adev,
516 			&adev->gfx.compute_ring[0],
517 			&disp_ibs[0],
518 			sgpr112_init_compute_shader_aldebaran,
519 			sizeof(sgpr112_init_compute_shader_aldebaran),
520 			sgpr112_init_regs_aldebaran,
521 			ARRAY_SIZE(sgpr112_init_regs_aldebaran),
522 			adev->gfx.cu_info.number,
523 			wb_ib.gpu_addr, pattern[0], &fences[0]);
524 	if (r) {
525 		dev_err(adev->dev, "failed to clear first 224 sgprs\n");
526 		goto pro_end;
527 	}
528 
529 	r = gfx_v9_4_2_wait_for_waves_assigned(adev,
530 			&wb_ib.ptr[1], 0b11,
531 			pattern[0],
532 			adev->gfx.cu_info.number * SIMD_ID_MAX * 2,
533 			true);
534 	if (r) {
535 		dev_err(adev->dev, "wave coverage failed when clear first 224 sgprs\n");
536 		wb_ib.ptr[0] = 0xdeadbeaf; /* stop waves */
537 		goto disp0_failed;
538 	}
539 
540 	r = gfx_v9_4_2_run_shader(adev,
541 			&adev->gfx.compute_ring[1],
542 			&disp_ibs[1],
543 			sgpr96_init_compute_shader_aldebaran,
544 			sizeof(sgpr96_init_compute_shader_aldebaran),
545 			sgpr96_init_regs_aldebaran,
546 			ARRAY_SIZE(sgpr96_init_regs_aldebaran),
547 			adev->gfx.cu_info.number * 2,
548 			wb_ib.gpu_addr, pattern[1], &fences[1]);
549 	if (r) {
550 		dev_err(adev->dev, "failed to clear next 576 sgprs\n");
551 		goto disp0_failed;
552 	}
553 
554 	r = gfx_v9_4_2_wait_for_waves_assigned(adev,
555 			&wb_ib.ptr[1], 0b11111100,
556 			pattern[1], adev->gfx.cu_info.number * SIMD_ID_MAX * 6,
557 			true);
558 	if (r) {
559 		dev_err(adev->dev, "wave coverage failed when clear first 576 sgprs\n");
560 		wb_ib.ptr[0] = 0xdeadbeaf; /* stop waves */
561 		goto disp1_failed;
562 	}
563 
564 	wb_ib.ptr[0] = 0xdeadbeaf; /* stop waves */
565 
566 	/* wait for the GPU to finish processing the IB */
567 	r = dma_fence_wait(fences[0], false);
568 	if (r) {
569 		dev_err(adev->dev, "timeout to clear first 224 sgprs\n");
570 		goto disp1_failed;
571 	}
572 
573 	r = dma_fence_wait(fences[1], false);
574 	if (r) {
575 		dev_err(adev->dev, "timeout to clear first 576 sgprs\n");
576 		goto disp1_failed;
577 	}
578 
579 	memset(wb_ib.ptr, 0, (1 + wb_size) * sizeof(uint32_t));
580 	r = gfx_v9_4_2_run_shader(adev,
581 			&adev->gfx.compute_ring[0],
582 			&disp_ibs[2],
583 			sgpr64_init_compute_shader_aldebaran,
584 			sizeof(sgpr64_init_compute_shader_aldebaran),
585 			sgpr64_init_regs_aldebaran,
586 			ARRAY_SIZE(sgpr64_init_regs_aldebaran),
587 			adev->gfx.cu_info.number,
588 			wb_ib.gpu_addr, pattern[2], &fences[2]);
589 	if (r) {
590 		dev_err(adev->dev, "failed to clear first 256 sgprs\n");
591 		goto disp1_failed;
592 	}
593 
594 	r = gfx_v9_4_2_wait_for_waves_assigned(adev,
595 			&wb_ib.ptr[1], 0b1111,
596 			pattern[2],
597 			adev->gfx.cu_info.number * SIMD_ID_MAX * 4,
598 			true);
599 	if (r) {
600 		dev_err(adev->dev, "wave coverage failed when clear first 256 sgprs\n");
601 		wb_ib.ptr[0] = 0xdeadbeaf; /* stop waves */
602 		goto disp2_failed;
603 	}
604 
605 	wb_ib.ptr[0] = 0xdeadbeaf; /* stop waves */
606 
607 	r = dma_fence_wait(fences[2], false);
608 	if (r) {
609 		dev_err(adev->dev, "timeout to clear first 256 sgprs\n");
610 		goto disp2_failed;
611 	}
612 
613 disp2_failed:
614 	amdgpu_ib_free(adev, &disp_ibs[2], NULL);
615 	dma_fence_put(fences[2]);
616 disp1_failed:
617 	amdgpu_ib_free(adev, &disp_ibs[1], NULL);
618 	dma_fence_put(fences[1]);
619 disp0_failed:
620 	amdgpu_ib_free(adev, &disp_ibs[0], NULL);
621 	dma_fence_put(fences[0]);
622 pro_end:
623 	amdgpu_ib_free(adev, &wb_ib, NULL);
624 
625 	if (r)
626 		dev_info(adev->dev, "Init SGPRS Failed\n");
627 	else
628 		dev_info(adev->dev, "Init SGPRS Successfully\n");
629 
630 	return r;
631 }
632 
gfx_v9_4_2_do_vgprs_init(struct amdgpu_device * adev)633 static int gfx_v9_4_2_do_vgprs_init(struct amdgpu_device *adev)
634 {
635 	int r;
636 	/* CU_ID: 0~15, SIMD_ID: 0~3, WAVE_ID: 0 ~ 9 */
637 	int wb_size = adev->gfx.config.max_shader_engines *
638 			 CU_ID_MAX * SIMD_ID_MAX * WAVE_ID_MAX;
639 	struct amdgpu_ib wb_ib;
640 	struct amdgpu_ib disp_ib;
641 	struct dma_fence *fence;
642 	u32 pattern = 0xa;
643 
644 	/* bail if the compute ring is not ready */
645 	if (!adev->gfx.compute_ring[0].sched.ready)
646 		return 0;
647 
648 	/* allocate the write-back buffer from IB */
649 	memset(&wb_ib, 0, sizeof(wb_ib));
650 	r = amdgpu_ib_get(adev, NULL, (1 + wb_size) * sizeof(uint32_t),
651 			  AMDGPU_IB_POOL_DIRECT, &wb_ib);
652 	if (r) {
653 		dev_err(adev->dev, "failed to get ib (%d) for wb.\n", r);
654 		return r;
655 	}
656 	memset(wb_ib.ptr, 0, (1 + wb_size) * sizeof(uint32_t));
657 
658 	r = gfx_v9_4_2_run_shader(adev,
659 			&adev->gfx.compute_ring[0],
660 			&disp_ib,
661 			vgpr_init_compute_shader_aldebaran,
662 			sizeof(vgpr_init_compute_shader_aldebaran),
663 			vgpr_init_regs_aldebaran,
664 			ARRAY_SIZE(vgpr_init_regs_aldebaran),
665 			adev->gfx.cu_info.number,
666 			wb_ib.gpu_addr, pattern, &fence);
667 	if (r) {
668 		dev_err(adev->dev, "failed to clear vgprs\n");
669 		goto pro_end;
670 	}
671 
672 	/* wait for the GPU to finish processing the IB */
673 	r = dma_fence_wait(fence, false);
674 	if (r) {
675 		dev_err(adev->dev, "timeout to clear vgprs\n");
676 		goto disp_failed;
677 	}
678 
679 	r = gfx_v9_4_2_wait_for_waves_assigned(adev,
680 			&wb_ib.ptr[1], 0b1,
681 			pattern,
682 			adev->gfx.cu_info.number * SIMD_ID_MAX,
683 			false);
684 	if (r) {
685 		dev_err(adev->dev, "failed to cover all simds when clearing vgprs\n");
686 		goto disp_failed;
687 	}
688 
689 disp_failed:
690 	amdgpu_ib_free(adev, &disp_ib, NULL);
691 	dma_fence_put(fence);
692 pro_end:
693 	amdgpu_ib_free(adev, &wb_ib, NULL);
694 
695 	if (r)
696 		dev_info(adev->dev, "Init VGPRS Failed\n");
697 	else
698 		dev_info(adev->dev, "Init VGPRS Successfully\n");
699 
700 	return r;
701 }
702 
gfx_v9_4_2_do_edc_gpr_workarounds(struct amdgpu_device * adev)703 int gfx_v9_4_2_do_edc_gpr_workarounds(struct amdgpu_device *adev)
704 {
705 	/* only support when RAS is enabled */
706 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
707 		return 0;
708 
709 	/* Workaround for ALDEBARAN, skip GPRs init in GPU reset.
710 	   Will remove it once GPRs init algorithm works for all CU settings. */
711 	if (amdgpu_in_reset(adev))
712 		return 0;
713 
714 	gfx_v9_4_2_do_sgprs_init(adev);
715 
716 	gfx_v9_4_2_do_vgprs_init(adev);
717 
718 	return 0;
719 }
720 
721 static void gfx_v9_4_2_query_sq_timeout_status(struct amdgpu_device *adev);
722 static void gfx_v9_4_2_reset_sq_timeout_status(struct amdgpu_device *adev);
723 
gfx_v9_4_2_init_golden_registers(struct amdgpu_device * adev,uint32_t die_id)724 void gfx_v9_4_2_init_golden_registers(struct amdgpu_device *adev,
725 				      uint32_t die_id)
726 {
727 	soc15_program_register_sequence(adev,
728 					golden_settings_gc_9_4_2_alde,
729 					ARRAY_SIZE(golden_settings_gc_9_4_2_alde));
730 
731 	/* apply golden settings per die */
732 	switch (die_id) {
733 	case 0:
734 		soc15_program_register_sequence(adev,
735 				golden_settings_gc_9_4_2_alde_die_0,
736 				ARRAY_SIZE(golden_settings_gc_9_4_2_alde_die_0));
737 		break;
738 	case 1:
739 		soc15_program_register_sequence(adev,
740 				golden_settings_gc_9_4_2_alde_die_1,
741 				ARRAY_SIZE(golden_settings_gc_9_4_2_alde_die_1));
742 		break;
743 	default:
744 		dev_warn(adev->dev,
745 			 "invalid die id %d, ignore channel fabricid remap settings\n",
746 			 die_id);
747 		break;
748 	}
749 
750 	return;
751 }
752 
gfx_v9_4_2_debug_trap_config_init(struct amdgpu_device * adev,uint32_t first_vmid,uint32_t last_vmid)753 void gfx_v9_4_2_debug_trap_config_init(struct amdgpu_device *adev,
754 				uint32_t first_vmid,
755 				uint32_t last_vmid)
756 {
757 	uint32_t data;
758 	int i;
759 
760 	mutex_lock(&adev->srbm_mutex);
761 
762 	for (i = first_vmid; i < last_vmid; i++) {
763 		data = 0;
764 		soc15_grbm_select(adev, 0, 0, 0, i);
765 		data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
766 		data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, 0);
767 		data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE,
768 					0);
769 		WREG32(SOC15_REG_OFFSET(GC, 0, regSPI_GDBG_PER_VMID_CNTL), data);
770 	}
771 
772 	soc15_grbm_select(adev, 0, 0, 0, 0);
773 	mutex_unlock(&adev->srbm_mutex);
774 }
775 
gfx_v9_4_2_set_power_brake_sequence(struct amdgpu_device * adev)776 void gfx_v9_4_2_set_power_brake_sequence(struct amdgpu_device *adev)
777 {
778 	u32 tmp;
779 
780 	gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
781 
782 	tmp = 0;
783 	tmp = REG_SET_FIELD(tmp, GC_THROTTLE_CTRL, PATTERN_MODE, 1);
784 	WREG32_SOC15(GC, 0, regGC_THROTTLE_CTRL, tmp);
785 
786 	tmp = 0;
787 	tmp = REG_SET_FIELD(tmp, GC_THROTTLE_CTRL1, PWRBRK_STALL_EN, 1);
788 	WREG32_SOC15(GC, 0, regGC_THROTTLE_CTRL1, tmp);
789 
790 	WREG32_SOC15(GC, 0, regGC_CAC_IND_INDEX, ixPWRBRK_STALL_PATTERN_CTRL);
791 	tmp = 0;
792 	tmp = REG_SET_FIELD(tmp, PWRBRK_STALL_PATTERN_CTRL, PWRBRK_END_STEP, 0x12);
793 	WREG32_SOC15(GC, 0, regGC_CAC_IND_DATA, tmp);
794 }
795 
796 static const struct soc15_reg_entry gfx_v9_4_2_edc_counter_regs[] = {
797 	/* CPF */
798 	{ SOC15_REG_ENTRY(GC, 0, regCPF_EDC_ROQ_CNT), 0, 1, 1 },
799 	{ SOC15_REG_ENTRY(GC, 0, regCPF_EDC_TAG_CNT), 0, 1, 1 },
800 	/* CPC */
801 	{ SOC15_REG_ENTRY(GC, 0, regCPC_EDC_SCRATCH_CNT), 0, 1, 1 },
802 	{ SOC15_REG_ENTRY(GC, 0, regCPC_EDC_UCODE_CNT), 0, 1, 1 },
803 	{ SOC15_REG_ENTRY(GC, 0, regDC_EDC_STATE_CNT), 0, 1, 1 },
804 	{ SOC15_REG_ENTRY(GC, 0, regDC_EDC_CSINVOC_CNT), 0, 1, 1 },
805 	{ SOC15_REG_ENTRY(GC, 0, regDC_EDC_RESTORE_CNT), 0, 1, 1 },
806 	/* GDS */
807 	{ SOC15_REG_ENTRY(GC, 0, regGDS_EDC_CNT), 0, 1, 1 },
808 	{ SOC15_REG_ENTRY(GC, 0, regGDS_EDC_GRBM_CNT), 0, 1, 1 },
809 	{ SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 1, 1 },
810 	{ SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_PHY_CNT), 0, 1, 1 },
811 	{ SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_PIPE_CNT), 0, 1, 1 },
812 	/* RLC */
813 	{ SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT), 0, 1, 1 },
814 	{ SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT2), 0, 1, 1 },
815 	/* SPI */
816 	{ SOC15_REG_ENTRY(GC, 0, regSPI_EDC_CNT), 0, 8, 1 },
817 	/* SQC */
818 	{ SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT), 0, 8, 7 },
819 	{ SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT2), 0, 8, 7 },
820 	{ SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT3), 0, 8, 7 },
821 	{ SOC15_REG_ENTRY(GC, 0, regSQC_EDC_PARITY_CNT3), 0, 8, 7 },
822 	/* SQ */
823 	{ SOC15_REG_ENTRY(GC, 0, regSQ_EDC_CNT), 0, 8, 14 },
824 	/* TCP */
825 	{ SOC15_REG_ENTRY(GC, 0, regTCP_EDC_CNT_NEW), 0, 8, 14 },
826 	/* TCI */
827 	{ SOC15_REG_ENTRY(GC, 0, regTCI_EDC_CNT), 0, 1, 69 },
828 	/* TCC */
829 	{ SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT), 0, 1, 16 },
830 	{ SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT2), 0, 1, 16 },
831 	/* TCA */
832 	{ SOC15_REG_ENTRY(GC, 0, regTCA_EDC_CNT), 0, 1, 2 },
833 	/* TCX */
834 	{ SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT), 0, 1, 2 },
835 	{ SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT2), 0, 1, 2 },
836 	/* TD */
837 	{ SOC15_REG_ENTRY(GC, 0, regTD_EDC_CNT), 0, 8, 14 },
838 	/* TA */
839 	{ SOC15_REG_ENTRY(GC, 0, regTA_EDC_CNT), 0, 8, 14 },
840 	/* GCEA */
841 	{ SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT), 0, 1, 16 },
842 	{ SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2), 0, 1, 16 },
843 	{ SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3), 0, 1, 16 },
844 };
845 
gfx_v9_4_2_select_se_sh(struct amdgpu_device * adev,u32 se_num,u32 sh_num,u32 instance)846 static void gfx_v9_4_2_select_se_sh(struct amdgpu_device *adev, u32 se_num,
847 				  u32 sh_num, u32 instance)
848 {
849 	u32 data;
850 
851 	if (instance == 0xffffffff)
852 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
853 				     INSTANCE_BROADCAST_WRITES, 1);
854 	else
855 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX,
856 				     instance);
857 
858 	if (se_num == 0xffffffff)
859 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES,
860 				     1);
861 	else
862 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
863 
864 	if (sh_num == 0xffffffff)
865 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES,
866 				     1);
867 	else
868 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
869 
870 	WREG32_SOC15_RLC_SHADOW_EX(reg, GC, 0, regGRBM_GFX_INDEX, data);
871 }
872 
873 static const struct soc15_ras_field_entry gfx_v9_4_2_ras_fields[] = {
874 	/* CPF */
875 	{ "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, regCPF_EDC_ROQ_CNT),
876 	  SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, SEC_COUNT_ME2),
877 	  SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, DED_COUNT_ME2) },
878 	{ "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, regCPF_EDC_ROQ_CNT),
879 	  SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, SEC_COUNT_ME1),
880 	  SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, DED_COUNT_ME1) },
881 	{ "CPF_TCIU_TAG", SOC15_REG_ENTRY(GC, 0, regCPF_EDC_TAG_CNT),
882 	  SOC15_REG_FIELD(CPF_EDC_TAG_CNT, SEC_COUNT),
883 	  SOC15_REG_FIELD(CPF_EDC_TAG_CNT, DED_COUNT) },
884 
885 	/* CPC */
886 	{ "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, regCPC_EDC_SCRATCH_CNT),
887 	  SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT),
888 	  SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT) },
889 	{ "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, regCPC_EDC_UCODE_CNT),
890 	  SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, SEC_COUNT),
891 	  SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, DED_COUNT) },
892 	{ "CPC_DC_STATE_RAM_ME1", SOC15_REG_ENTRY(GC, 0, regDC_EDC_STATE_CNT),
893 	  SOC15_REG_FIELD(DC_EDC_STATE_CNT, SEC_COUNT_ME1),
894 	  SOC15_REG_FIELD(DC_EDC_STATE_CNT, DED_COUNT_ME1) },
895 	{ "CPC_DC_CSINVOC_RAM_ME1",
896 	  SOC15_REG_ENTRY(GC, 0, regDC_EDC_CSINVOC_CNT),
897 	  SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, SEC_COUNT_ME1),
898 	  SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, DED_COUNT_ME1) },
899 	{ "CPC_DC_RESTORE_RAM_ME1",
900 	  SOC15_REG_ENTRY(GC, 0, regDC_EDC_RESTORE_CNT),
901 	  SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, SEC_COUNT_ME1),
902 	  SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, DED_COUNT_ME1) },
903 	{ "CPC_DC_CSINVOC_RAM1_ME1",
904 	  SOC15_REG_ENTRY(GC, 0, regDC_EDC_CSINVOC_CNT),
905 	  SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, SEC_COUNT1_ME1),
906 	  SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, DED_COUNT1_ME1) },
907 	{ "CPC_DC_RESTORE_RAM1_ME1",
908 	  SOC15_REG_ENTRY(GC, 0, regDC_EDC_RESTORE_CNT),
909 	  SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, SEC_COUNT1_ME1),
910 	  SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, DED_COUNT1_ME1) },
911 
912 	/* GDS */
913 	{ "GDS_GRBM", SOC15_REG_ENTRY(GC, 0, regGDS_EDC_GRBM_CNT),
914 	  SOC15_REG_FIELD(GDS_EDC_GRBM_CNT, SEC),
915 	  SOC15_REG_FIELD(GDS_EDC_GRBM_CNT, DED) },
916 	{ "GDS_MEM", SOC15_REG_ENTRY(GC, 0, regGDS_EDC_CNT),
917 	  SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_SEC),
918 	  SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_DED) },
919 	{ "GDS_PHY_CMD_RAM_MEM", SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_PHY_CNT),
920 	  SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC),
921 	  SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED) },
922 	{ "GDS_PHY_DATA_RAM_MEM", SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_PHY_CNT),
923 	  SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SEC),
924 	  SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_DED) },
925 	{ "GDS_ME0_CS_PIPE_MEM", SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_PHY_CNT),
926 	  SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC),
927 	  SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED) },
928 	{ "GDS_ME1_PIPE0_PIPE_MEM",
929 	  SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_PIPE_CNT),
930 	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC),
931 	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED) },
932 	{ "GDS_ME1_PIPE1_PIPE_MEM",
933 	  SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_PIPE_CNT),
934 	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC),
935 	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED) },
936 	{ "GDS_ME1_PIPE2_PIPE_MEM",
937 	  SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_PIPE_CNT),
938 	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC),
939 	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED) },
940 	{ "GDS_ME1_PIPE3_PIPE_MEM",
941 	  SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_PIPE_CNT),
942 	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC),
943 	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED) },
944 	{ "GDS_ME0_GFXHP3D_PIX_DED",
945 	  SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
946 	  SOC15_REG_FIELD(GDS_EDC_OA_DED, ME0_GFXHP3D_PIX_DED) },
947 	{ "GDS_ME0_GFXHP3D_VTX_DED",
948 	  SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
949 	  SOC15_REG_FIELD(GDS_EDC_OA_DED, ME0_GFXHP3D_VTX_DED) },
950 	{ "GDS_ME0_CS_DED",
951 	  SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
952 	  SOC15_REG_FIELD(GDS_EDC_OA_DED, ME0_CS_DED) },
953 	{ "GDS_ME0_GFXHP3D_GS_DED",
954 	  SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
955 	  SOC15_REG_FIELD(GDS_EDC_OA_DED, ME0_GFXHP3D_GS_DED) },
956 	{ "GDS_ME1_PIPE0_DED",
957 	  SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
958 	  SOC15_REG_FIELD(GDS_EDC_OA_DED, ME1_PIPE0_DED) },
959 	{ "GDS_ME1_PIPE1_DED",
960 	  SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
961 	  SOC15_REG_FIELD(GDS_EDC_OA_DED, ME1_PIPE1_DED) },
962 	{ "GDS_ME1_PIPE2_DED",
963 	  SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
964 	  SOC15_REG_FIELD(GDS_EDC_OA_DED, ME1_PIPE2_DED) },
965 	{ "GDS_ME1_PIPE3_DED",
966 	  SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
967 	  SOC15_REG_FIELD(GDS_EDC_OA_DED, ME1_PIPE3_DED) },
968 	{ "GDS_ME2_PIPE0_DED",
969 	  SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
970 	  SOC15_REG_FIELD(GDS_EDC_OA_DED, ME2_PIPE0_DED) },
971 	{ "GDS_ME2_PIPE1_DED",
972 	  SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
973 	  SOC15_REG_FIELD(GDS_EDC_OA_DED, ME2_PIPE1_DED) },
974 	{ "GDS_ME2_PIPE2_DED",
975 	  SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
976 	  SOC15_REG_FIELD(GDS_EDC_OA_DED, ME2_PIPE2_DED) },
977 	{ "GDS_ME2_PIPE3_DED",
978 	  SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
979 	  SOC15_REG_FIELD(GDS_EDC_OA_DED, ME2_PIPE3_DED) },
980 
981 	/* RLC */
982 	{ "RLCG_INSTR_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT),
983 	  SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_INSTR_RAM_SEC_COUNT),
984 	  SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_INSTR_RAM_DED_COUNT) },
985 	{ "RLCG_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT),
986 	  SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_SCRATCH_RAM_SEC_COUNT),
987 	  SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_SCRATCH_RAM_DED_COUNT) },
988 	{ "RLCV_INSTR_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT),
989 	  SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_INSTR_RAM_SEC_COUNT),
990 	  SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_INSTR_RAM_DED_COUNT) },
991 	{ "RLCV_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT),
992 	  SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_SCRATCH_RAM_SEC_COUNT),
993 	  SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_SCRATCH_RAM_DED_COUNT) },
994 	{ "RLC_TCTAG_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT),
995 	  SOC15_REG_FIELD(RLC_EDC_CNT, RLC_TCTAG_RAM_SEC_COUNT),
996 	  SOC15_REG_FIELD(RLC_EDC_CNT, RLC_TCTAG_RAM_DED_COUNT) },
997 	{ "RLC_SPM_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT),
998 	  SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SPM_SCRATCH_RAM_SEC_COUNT),
999 	  SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SPM_SCRATCH_RAM_DED_COUNT) },
1000 	{ "RLC_SRM_DATA_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT),
1001 	  SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_DATA_RAM_SEC_COUNT),
1002 	  SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_DATA_RAM_DED_COUNT) },
1003 	{ "RLC_SRM_ADDR_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT),
1004 	  SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_ADDR_RAM_SEC_COUNT),
1005 	  SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_ADDR_RAM_DED_COUNT) },
1006 	{ "RLC_SPM_SE0_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT2),
1007 	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE0_SCRATCH_RAM_SEC_COUNT),
1008 	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE0_SCRATCH_RAM_DED_COUNT) },
1009 	{ "RLC_SPM_SE1_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT2),
1010 	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE1_SCRATCH_RAM_SEC_COUNT),
1011 	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE1_SCRATCH_RAM_DED_COUNT) },
1012 	{ "RLC_SPM_SE2_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT2),
1013 	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE2_SCRATCH_RAM_SEC_COUNT),
1014 	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE2_SCRATCH_RAM_DED_COUNT) },
1015 	{ "RLC_SPM_SE3_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT2),
1016 	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE3_SCRATCH_RAM_SEC_COUNT),
1017 	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE3_SCRATCH_RAM_DED_COUNT) },
1018 	{ "RLC_SPM_SE4_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT2),
1019 	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE4_SCRATCH_RAM_SEC_COUNT),
1020 	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE4_SCRATCH_RAM_DED_COUNT) },
1021 	{ "RLC_SPM_SE5_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT2),
1022 	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE5_SCRATCH_RAM_SEC_COUNT),
1023 	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE5_SCRATCH_RAM_DED_COUNT) },
1024 	{ "RLC_SPM_SE6_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT2),
1025 	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE6_SCRATCH_RAM_SEC_COUNT),
1026 	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE6_SCRATCH_RAM_DED_COUNT) },
1027 	{ "RLC_SPM_SE7_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT2),
1028 	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE7_SCRATCH_RAM_SEC_COUNT),
1029 	  SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE7_SCRATCH_RAM_DED_COUNT) },
1030 
1031 	/* SPI */
1032 	{ "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, regSPI_EDC_CNT),
1033 	  SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_SEC_COUNT),
1034 	  SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_DED_COUNT) },
1035 	{ "SPI_GDS_EXPREQ", SOC15_REG_ENTRY(GC, 0, regSPI_EDC_CNT),
1036 	  SOC15_REG_FIELD(SPI_EDC_CNT, SPI_GDS_EXPREQ_SEC_COUNT),
1037 	  SOC15_REG_FIELD(SPI_EDC_CNT, SPI_GDS_EXPREQ_DED_COUNT) },
1038 	{ "SPI_WB_GRANT_30", SOC15_REG_ENTRY(GC, 0, regSPI_EDC_CNT),
1039 	  SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_30_SEC_COUNT),
1040 	  SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_30_DED_COUNT) },
1041 	{ "SPI_LIFE_CNT", SOC15_REG_ENTRY(GC, 0, regSPI_EDC_CNT),
1042 	  SOC15_REG_FIELD(SPI_EDC_CNT, SPI_LIFE_CNT_SEC_COUNT),
1043 	  SOC15_REG_FIELD(SPI_EDC_CNT, SPI_LIFE_CNT_DED_COUNT) },
1044 
1045 	/* SQC - regSQC_EDC_CNT */
1046 	{ "SQC_DATA_CU0_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT),
1047 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT),
1048 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT) },
1049 	{ "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT),
1050 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT),
1051 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT) },
1052 	{ "SQC_DATA_CU1_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT),
1053 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT),
1054 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT) },
1055 	{ "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT),
1056 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT),
1057 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT) },
1058 	{ "SQC_DATA_CU2_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT),
1059 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT),
1060 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT) },
1061 	{ "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT),
1062 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT),
1063 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT) },
1064 	{ "SQC_DATA_CU3_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT),
1065 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU3_WRITE_DATA_BUF_SEC_COUNT),
1066 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU3_WRITE_DATA_BUF_DED_COUNT) },
1067 	{ "SQC_DATA_CU3_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT),
1068 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU3_UTCL1_LFIFO_SEC_COUNT),
1069 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU3_UTCL1_LFIFO_DED_COUNT) },
1070 
1071 	/* SQC - regSQC_EDC_CNT2 */
1072 	{ "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT2),
1073 	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT),
1074 	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT) },
1075 	{ "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT2),
1076 	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT),
1077 	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT) },
1078 	{ "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT2),
1079 	  SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT),
1080 	  SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT) },
1081 	{ "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT2),
1082 	  SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT),
1083 	  SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT) },
1084 	{ "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT2),
1085 	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT),
1086 	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT) },
1087 	{ "SQC_DATA_BANKA_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT2),
1088 	  SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_DIRTY_BIT_RAM_SEC_COUNT),
1089 	  SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_DIRTY_BIT_RAM_DED_COUNT) },
1090 
1091 	/* SQC - regSQC_EDC_CNT3 */
1092 	{ "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT3),
1093 	  SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT),
1094 	  SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT) },
1095 	{ "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT3),
1096 	  SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT),
1097 	  SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT) },
1098 	{ "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT3),
1099 	  SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT),
1100 	  SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT) },
1101 	{ "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT3),
1102 	  SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT),
1103 	  SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT) },
1104 	{ "SQC_DATA_BANKB_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT3),
1105 	  SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_DIRTY_BIT_RAM_SEC_COUNT),
1106 	  SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_DIRTY_BIT_RAM_DED_COUNT) },
1107 
1108 	/* SQC - regSQC_EDC_PARITY_CNT3 */
1109 	{ "SQC_INST_BANKA_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_PARITY_CNT3),
1110 	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKA_UTCL1_MISS_FIFO_SEC_COUNT),
1111 	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKA_UTCL1_MISS_FIFO_DED_COUNT) },
1112 	{ "SQC_INST_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_PARITY_CNT3),
1113 	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKA_MISS_FIFO_SEC_COUNT),
1114 	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKA_MISS_FIFO_DED_COUNT) },
1115 	{ "SQC_DATA_BANKA_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_PARITY_CNT3),
1116 	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_HIT_FIFO_SEC_COUNT),
1117 	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_HIT_FIFO_DED_COUNT) },
1118 	{ "SQC_DATA_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_PARITY_CNT3),
1119 	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_MISS_FIFO_SEC_COUNT),
1120 	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_MISS_FIFO_DED_COUNT) },
1121 	{ "SQC_INST_BANKB_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_PARITY_CNT3),
1122 	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKB_UTCL1_MISS_FIFO_SEC_COUNT),
1123 	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKB_UTCL1_MISS_FIFO_DED_COUNT) },
1124 	{ "SQC_INST_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_PARITY_CNT3),
1125 	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKB_MISS_FIFO_SEC_COUNT),
1126 	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKB_MISS_FIFO_DED_COUNT) },
1127 	{ "SQC_DATA_BANKB_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_PARITY_CNT3),
1128 	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_HIT_FIFO_SEC_COUNT),
1129 	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_HIT_FIFO_DED_COUNT) },
1130 	{ "SQC_DATA_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_PARITY_CNT3),
1131 	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_MISS_FIFO_SEC_COUNT),
1132 	  SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_MISS_FIFO_DED_COUNT) },
1133 
1134 	/* SQ */
1135 	{ "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, regSQ_EDC_CNT),
1136 	  SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_SEC_COUNT),
1137 	  SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_DED_COUNT) },
1138 	{ "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, regSQ_EDC_CNT),
1139 	  SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_SEC_COUNT),
1140 	  SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_DED_COUNT) },
1141 	{ "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, regSQ_EDC_CNT),
1142 	  SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_SEC_COUNT),
1143 	  SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_DED_COUNT) },
1144 	{ "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, regSQ_EDC_CNT),
1145 	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_SEC_COUNT),
1146 	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_DED_COUNT) },
1147 	{ "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, regSQ_EDC_CNT),
1148 	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_SEC_COUNT),
1149 	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_DED_COUNT) },
1150 	{ "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, regSQ_EDC_CNT),
1151 	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_SEC_COUNT),
1152 	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_DED_COUNT) },
1153 	{ "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, regSQ_EDC_CNT),
1154 	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_SEC_COUNT),
1155 	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_DED_COUNT) },
1156 
1157 	/* TCP */
1158 	{ "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, regTCP_EDC_CNT_NEW),
1159 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT),
1160 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT) },
1161 	{ "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, regTCP_EDC_CNT_NEW),
1162 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT),
1163 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT) },
1164 	{ "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, regTCP_EDC_CNT_NEW),
1165 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_SEC_COUNT),
1166 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_DED_COUNT) },
1167 	{ "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, regTCP_EDC_CNT_NEW),
1168 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT),
1169 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_DED_COUNT) },
1170 	{ "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, regTCP_EDC_CNT_NEW),
1171 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, DB_RAM_SEC_COUNT),
1172 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, DB_RAM_DED_COUNT) },
1173 	{ "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, regTCP_EDC_CNT_NEW),
1174 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT),
1175 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT) },
1176 	{ "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, regTCP_EDC_CNT_NEW),
1177 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT),
1178 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT) },
1179 
1180 	/* TCI */
1181 	{ "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, regTCI_EDC_CNT),
1182 	  SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_SEC_COUNT),
1183 	  SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_DED_COUNT) },
1184 
1185 	/* TCC */
1186 	{ "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT),
1187 	  SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT),
1188 	  SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_DED_COUNT) },
1189 	{ "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT),
1190 	  SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT),
1191 	  SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT) },
1192 	{ "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT),
1193 	  SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT),
1194 	  SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT) },
1195 	{ "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT),
1196 	  SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT),
1197 	  SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT) },
1198 	{ "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT),
1199 	  SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT),
1200 	  SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_DED_COUNT) },
1201 	{ "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT),
1202 	  SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_SEC_COUNT),
1203 	  SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_DED_COUNT) },
1204 	{ "TCC_LATENCY_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT),
1205 	  SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_NEXT_RAM_SEC_COUNT),
1206 	  SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_NEXT_RAM_DED_COUNT) },
1207 	{ "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT2),
1208 	  SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SEC_COUNT),
1209 	  SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_DED_COUNT) },
1210 	{ "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT2),
1211 	  SOC15_REG_FIELD(TCC_EDC_CNT2, UC_ATOMIC_FIFO_SEC_COUNT),
1212 	  SOC15_REG_FIELD(TCC_EDC_CNT2, UC_ATOMIC_FIFO_DED_COUNT) },
1213 	{ "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT2),
1214 	  SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_SEC_COUNT),
1215 	  SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_DED_COUNT) },
1216 	{ "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT2),
1217 	  SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_CONTROL_SEC_COUNT),
1218 	  SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_CONTROL_DED_COUNT) },
1219 	{ "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT2),
1220 	  SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_TRANSFER_SEC_COUNT),
1221 	  SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_TRANSFER_DED_COUNT) },
1222 	{ "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT2),
1223 	  SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_DEC_SEC_COUNT),
1224 	  SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_DEC_DED_COUNT) },
1225 	{ "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT2),
1226 	  SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_SEC_COUNT),
1227 	  SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_DED_COUNT) },
1228 	{ "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT2),
1229 	  SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_DATA_SEC_COUNT),
1230 	  SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_DATA_DED_COUNT) },
1231 
1232 	/* TCA */
1233 	{ "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, regTCA_EDC_CNT),
1234 	  SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_SEC_COUNT),
1235 	  SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_DED_COUNT) },
1236 	{ "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, regTCA_EDC_CNT),
1237 	  SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_SEC_COUNT),
1238 	  SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_DED_COUNT) },
1239 
1240 	/* TCX */
1241 	{ "TCX_GROUP0", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT),
1242 	  SOC15_REG_FIELD(TCX_EDC_CNT, GROUP0_SEC_COUNT),
1243 	  SOC15_REG_FIELD(TCX_EDC_CNT, GROUP0_DED_COUNT) },
1244 	{ "TCX_GROUP1", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT),
1245 	  SOC15_REG_FIELD(TCX_EDC_CNT, GROUP1_SEC_COUNT),
1246 	  SOC15_REG_FIELD(TCX_EDC_CNT, GROUP1_DED_COUNT) },
1247 	{ "TCX_GROUP2", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT),
1248 	  SOC15_REG_FIELD(TCX_EDC_CNT, GROUP2_SEC_COUNT),
1249 	  SOC15_REG_FIELD(TCX_EDC_CNT, GROUP2_DED_COUNT) },
1250 	{ "TCX_GROUP3", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT),
1251 	  SOC15_REG_FIELD(TCX_EDC_CNT, GROUP3_SEC_COUNT),
1252 	  SOC15_REG_FIELD(TCX_EDC_CNT, GROUP3_DED_COUNT) },
1253 	{ "TCX_GROUP4", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT),
1254 	  SOC15_REG_FIELD(TCX_EDC_CNT, GROUP4_SEC_COUNT),
1255 	  SOC15_REG_FIELD(TCX_EDC_CNT, GROUP4_DED_COUNT) },
1256 	{ "TCX_GROUP5", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT),
1257 	  SOC15_REG_FIELD(TCX_EDC_CNT, GROUP5_SED_COUNT), 0, 0 },
1258 	{ "TCX_GROUP6", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT),
1259 	  SOC15_REG_FIELD(TCX_EDC_CNT, GROUP6_SED_COUNT), 0, 0 },
1260 	{ "TCX_GROUP7", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT),
1261 	  SOC15_REG_FIELD(TCX_EDC_CNT, GROUP7_SED_COUNT), 0, 0 },
1262 	{ "TCX_GROUP8", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT),
1263 	  SOC15_REG_FIELD(TCX_EDC_CNT, GROUP8_SED_COUNT), 0, 0 },
1264 	{ "TCX_GROUP9", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT),
1265 	  SOC15_REG_FIELD(TCX_EDC_CNT, GROUP9_SED_COUNT), 0, 0 },
1266 	{ "TCX_GROUP10", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT),
1267 	  SOC15_REG_FIELD(TCX_EDC_CNT, GROUP10_SED_COUNT), 0, 0 },
1268 	{ "TCX_GROUP11", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT2),
1269 	  SOC15_REG_FIELD(TCX_EDC_CNT2, GROUP11_SED_COUNT), 0, 0 },
1270 	{ "TCX_GROUP12", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT2),
1271 	  SOC15_REG_FIELD(TCX_EDC_CNT2, GROUP12_SED_COUNT), 0, 0 },
1272 	{ "TCX_GROUP13", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT2),
1273 	  SOC15_REG_FIELD(TCX_EDC_CNT2, GROUP13_SED_COUNT), 0, 0 },
1274 	{ "TCX_GROUP14", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT2),
1275 	  SOC15_REG_FIELD(TCX_EDC_CNT2, GROUP14_SED_COUNT), 0, 0 },
1276 
1277 	/* TD */
1278 	{ "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, regTD_EDC_CNT),
1279 	  SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT),
1280 	  SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT) },
1281 	{ "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, regTD_EDC_CNT),
1282 	  SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT),
1283 	  SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT) },
1284 	{ "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, regTD_EDC_CNT),
1285 	  SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_SEC_COUNT),
1286 	  SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_DED_COUNT) },
1287 
1288 	/* TA */
1289 	{ "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, regTA_EDC_CNT),
1290 	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT),
1291 	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT) },
1292 	{ "TA_FS_AFIFO_LO", SOC15_REG_ENTRY(GC, 0, regTA_EDC_CNT),
1293 	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_LO_SEC_COUNT),
1294 	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_LO_DED_COUNT) },
1295 	{ "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, regTA_EDC_CNT),
1296 	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_SEC_COUNT),
1297 	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_DED_COUNT) },
1298 	{ "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, regTA_EDC_CNT),
1299 	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_SEC_COUNT),
1300 	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_DED_COUNT) },
1301 	{ "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, regTA_EDC_CNT),
1302 	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_SEC_COUNT),
1303 	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_DED_COUNT) },
1304 	{ "TA_FS_AFIFO_HI", SOC15_REG_ENTRY(GC, 0, regTA_EDC_CNT),
1305 	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_HI_SEC_COUNT),
1306 	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_HI_DED_COUNT) },
1307 
1308 	/* EA - regGCEA_EDC_CNT */
1309 	{ "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT),
1310 	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
1311 	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT) },
1312 	{ "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT),
1313 	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
1314 	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT) },
1315 	{ "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT),
1316 	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
1317 	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT) },
1318 	{ "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT),
1319 	  SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
1320 	  SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT) },
1321 	{ "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT),
1322 	  SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
1323 	  SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT) },
1324 	{ "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT),
1325 	  SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_DATAMEM_SEC_COUNT),
1326 	  SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_DATAMEM_DED_COUNT) },
1327 	{ "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT),
1328 	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), 0, 0 },
1329 	{ "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT),
1330 	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), 0, 0 },
1331 	{ "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT),
1332 	  SOC15_REG_FIELD(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT), 0, 0 },
1333 	{ "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT),
1334 	  SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT), 0, 0 },
1335 
1336 	/* EA - regGCEA_EDC_CNT2 */
1337 	{ "EA_GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2),
1338 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
1339 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT) },
1340 	{ "EA_GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2),
1341 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
1342 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT) },
1343 	{ "EA_GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2),
1344 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
1345 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT) },
1346 	{ "EA_GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2),
1347 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), 0, 0 },
1348 	{ "EA_GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2),
1349 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), 0, 0 },
1350 	{ "EA_MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2),
1351 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT),
1352 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_DED_COUNT) },
1353 	{ "EA_MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2),
1354 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT),
1355 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_DED_COUNT) },
1356 	{ "EA_MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2),
1357 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT),
1358 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_DED_COUNT) },
1359 	{ "EA_MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2),
1360 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT),
1361 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_DED_COUNT) },
1362 
1363 	/* EA - regGCEA_EDC_CNT3 */
1364 	{ "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3), 0, 0,
1365 	  SOC15_REG_FIELD(GCEA_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT) },
1366 	{ "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3), 0, 0,
1367 	  SOC15_REG_FIELD(GCEA_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT) },
1368 	{ "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3), 0, 0,
1369 	  SOC15_REG_FIELD(GCEA_EDC_CNT3, IORD_CMDMEM_DED_COUNT) },
1370 	{ "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3), 0, 0,
1371 	  SOC15_REG_FIELD(GCEA_EDC_CNT3, IOWR_CMDMEM_DED_COUNT) },
1372 	{ "EA_GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3), 0, 0,
1373 	  SOC15_REG_FIELD(GCEA_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT) },
1374 	{ "EA_GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3), 0, 0,
1375 	  SOC15_REG_FIELD(GCEA_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT) },
1376 	{ "EA_MAM_A0MEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3),
1377 	  SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A0MEM_SEC_COUNT),
1378 	  SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A0MEM_DED_COUNT) },
1379 	{ "EA_MAM_A1MEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3),
1380 	  SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A1MEM_SEC_COUNT),
1381 	  SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A1MEM_DED_COUNT) },
1382 	{ "EA_MAM_A2MEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3),
1383 	  SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A2MEM_SEC_COUNT),
1384 	  SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A2MEM_DED_COUNT) },
1385 	{ "EA_MAM_A3MEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3),
1386 	  SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A3MEM_SEC_COUNT),
1387 	  SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A3MEM_DED_COUNT) },
1388 	{ "EA_MAM_AFMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3),
1389 	  SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_AFMEM_SEC_COUNT),
1390 	  SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_AFMEM_DED_COUNT) },
1391 };
1392 
1393 static const char * const vml2_walker_mems[] = {
1394 	"UTC_VML2_CACHE_PDE0_MEM0",
1395 	"UTC_VML2_CACHE_PDE0_MEM1",
1396 	"UTC_VML2_CACHE_PDE1_MEM0",
1397 	"UTC_VML2_CACHE_PDE1_MEM1",
1398 	"UTC_VML2_CACHE_PDE2_MEM0",
1399 	"UTC_VML2_CACHE_PDE2_MEM1",
1400 	"UTC_VML2_RDIF_ARADDRS",
1401 	"UTC_VML2_RDIF_LOG_FIFO",
1402 	"UTC_VML2_QUEUE_REQ",
1403 	"UTC_VML2_QUEUE_RET",
1404 };
1405 
1406 static struct gfx_v9_4_2_utc_block gfx_v9_4_2_utc_blocks[] = {
1407 	{ VML2_MEM, 8, 2, 2,
1408 	  { SOC15_REG_ENTRY(GC, 0, regVML2_MEM_ECC_INDEX) },
1409 	  { SOC15_REG_ENTRY(GC, 0, regVML2_MEM_ECC_CNTL) },
1410 	  SOC15_REG_FIELD(VML2_MEM_ECC_CNTL, SEC_COUNT),
1411 	  SOC15_REG_FIELD(VML2_MEM_ECC_CNTL, DED_COUNT),
1412 	  REG_SET_FIELD(0, VML2_MEM_ECC_CNTL, WRITE_COUNTERS, 1) },
1413 	{ VML2_WALKER_MEM, ARRAY_SIZE(vml2_walker_mems), 1, 1,
1414 	  { SOC15_REG_ENTRY(GC, 0, regVML2_WALKER_MEM_ECC_INDEX) },
1415 	  { SOC15_REG_ENTRY(GC, 0, regVML2_WALKER_MEM_ECC_CNTL) },
1416 	  SOC15_REG_FIELD(VML2_WALKER_MEM_ECC_CNTL, SEC_COUNT),
1417 	  SOC15_REG_FIELD(VML2_WALKER_MEM_ECC_CNTL, DED_COUNT),
1418 	  REG_SET_FIELD(0, VML2_WALKER_MEM_ECC_CNTL, WRITE_COUNTERS, 1) },
1419 	{ UTCL2_MEM, 18, 1, 2,
1420 	  { SOC15_REG_ENTRY(GC, 0, regUTCL2_MEM_ECC_INDEX) },
1421 	  { SOC15_REG_ENTRY(GC, 0, regUTCL2_MEM_ECC_CNTL) },
1422 	  SOC15_REG_FIELD(UTCL2_MEM_ECC_CNTL, SEC_COUNT),
1423 	  SOC15_REG_FIELD(UTCL2_MEM_ECC_CNTL, DED_COUNT),
1424 	  REG_SET_FIELD(0, UTCL2_MEM_ECC_CNTL, WRITE_COUNTERS, 1) },
1425 	{ ATC_L2_CACHE_2M, 8, 2, 1,
1426 	  { SOC15_REG_ENTRY(GC, 0, regATC_L2_CACHE_2M_DSM_INDEX) },
1427 	  { SOC15_REG_ENTRY(GC, 0, regATC_L2_CACHE_2M_DSM_CNTL) },
1428 	  SOC15_REG_FIELD(ATC_L2_CACHE_2M_DSM_CNTL, SEC_COUNT),
1429 	  SOC15_REG_FIELD(ATC_L2_CACHE_2M_DSM_CNTL, DED_COUNT),
1430 	  REG_SET_FIELD(0, ATC_L2_CACHE_2M_DSM_CNTL, WRITE_COUNTERS, 1) },
1431 	{ ATC_L2_CACHE_32K, 8, 2, 2,
1432 	  { SOC15_REG_ENTRY(GC, 0, regATC_L2_CACHE_32K_DSM_INDEX) },
1433 	  { SOC15_REG_ENTRY(GC, 0, regATC_L2_CACHE_32K_DSM_CNTL) },
1434 	  SOC15_REG_FIELD(ATC_L2_CACHE_32K_DSM_CNTL, SEC_COUNT),
1435 	  SOC15_REG_FIELD(ATC_L2_CACHE_32K_DSM_CNTL, DED_COUNT),
1436 	  REG_SET_FIELD(0, ATC_L2_CACHE_32K_DSM_CNTL, WRITE_COUNTERS, 1) },
1437 	{ ATC_L2_CACHE_4K, 8, 2, 8,
1438 	  { SOC15_REG_ENTRY(GC, 0, regATC_L2_CACHE_4K_DSM_INDEX) },
1439 	  { SOC15_REG_ENTRY(GC, 0, regATC_L2_CACHE_4K_DSM_CNTL) },
1440 	  SOC15_REG_FIELD(ATC_L2_CACHE_4K_DSM_CNTL, SEC_COUNT),
1441 	  SOC15_REG_FIELD(ATC_L2_CACHE_4K_DSM_CNTL, DED_COUNT),
1442 	  REG_SET_FIELD(0, ATC_L2_CACHE_4K_DSM_CNTL, WRITE_COUNTERS, 1) },
1443 };
1444 
1445 static const struct soc15_reg_entry gfx_v9_4_2_ea_err_status_regs = {
1446 	SOC15_REG_ENTRY(GC, 0, regGCEA_ERR_STATUS), 0, 1, 16
1447 };
1448 
gfx_v9_4_2_get_reg_error_count(struct amdgpu_device * adev,const struct soc15_reg_entry * reg,uint32_t se_id,uint32_t inst_id,uint32_t value,uint32_t * sec_count,uint32_t * ded_count)1449 static int gfx_v9_4_2_get_reg_error_count(struct amdgpu_device *adev,
1450 					  const struct soc15_reg_entry *reg,
1451 					  uint32_t se_id, uint32_t inst_id,
1452 					  uint32_t value, uint32_t *sec_count,
1453 					  uint32_t *ded_count)
1454 {
1455 	uint32_t i;
1456 	uint32_t sec_cnt, ded_cnt;
1457 
1458 	for (i = 0; i < ARRAY_SIZE(gfx_v9_4_2_ras_fields); i++) {
1459 		if (gfx_v9_4_2_ras_fields[i].reg_offset != reg->reg_offset ||
1460 		    gfx_v9_4_2_ras_fields[i].seg != reg->seg ||
1461 		    gfx_v9_4_2_ras_fields[i].inst != reg->inst)
1462 			continue;
1463 
1464 		sec_cnt = SOC15_RAS_REG_FIELD_VAL(
1465 			value, gfx_v9_4_2_ras_fields[i], sec);
1466 		if (sec_cnt) {
1467 			dev_info(adev->dev,
1468 				 "GFX SubBlock %s, Instance[%d][%d], SEC %d\n",
1469 				 gfx_v9_4_2_ras_fields[i].name, se_id, inst_id,
1470 				 sec_cnt);
1471 			*sec_count += sec_cnt;
1472 		}
1473 
1474 		ded_cnt = SOC15_RAS_REG_FIELD_VAL(
1475 			value, gfx_v9_4_2_ras_fields[i], ded);
1476 		if (ded_cnt) {
1477 			dev_info(adev->dev,
1478 				 "GFX SubBlock %s, Instance[%d][%d], DED %d\n",
1479 				 gfx_v9_4_2_ras_fields[i].name, se_id, inst_id,
1480 				 ded_cnt);
1481 			*ded_count += ded_cnt;
1482 		}
1483 	}
1484 
1485 	return 0;
1486 }
1487 
gfx_v9_4_2_query_sram_edc_count(struct amdgpu_device * adev,uint32_t * sec_count,uint32_t * ded_count)1488 static int gfx_v9_4_2_query_sram_edc_count(struct amdgpu_device *adev,
1489 				uint32_t *sec_count, uint32_t *ded_count)
1490 {
1491 	uint32_t i, j, k, data;
1492 	uint32_t sec_cnt = 0, ded_cnt = 0;
1493 
1494 	if (sec_count && ded_count) {
1495 		*sec_count = 0;
1496 		*ded_count = 0;
1497 	}
1498 
1499 	mutex_lock(&adev->grbm_idx_mutex);
1500 
1501 	for (i = 0; i < ARRAY_SIZE(gfx_v9_4_2_edc_counter_regs); i++) {
1502 		for (j = 0; j < gfx_v9_4_2_edc_counter_regs[i].se_num; j++) {
1503 			for (k = 0; k < gfx_v9_4_2_edc_counter_regs[i].instance;
1504 			     k++) {
1505 				gfx_v9_4_2_select_se_sh(adev, j, 0, k);
1506 
1507 				/* if sec/ded_count is null, just clear counter */
1508 				if (!sec_count || !ded_count) {
1509 					WREG32(SOC15_REG_ENTRY_OFFSET(
1510 						gfx_v9_4_2_edc_counter_regs[i]), 0);
1511 					continue;
1512 				}
1513 
1514 				data = RREG32(SOC15_REG_ENTRY_OFFSET(
1515 					gfx_v9_4_2_edc_counter_regs[i]));
1516 
1517 				if (!data)
1518 					continue;
1519 
1520 				gfx_v9_4_2_get_reg_error_count(adev,
1521 					&gfx_v9_4_2_edc_counter_regs[i],
1522 					j, k, data, &sec_cnt, &ded_cnt);
1523 
1524 				/* clear counter after read */
1525 				WREG32(SOC15_REG_ENTRY_OFFSET(
1526 					gfx_v9_4_2_edc_counter_regs[i]), 0);
1527 			}
1528 		}
1529 	}
1530 
1531 	if (sec_count && ded_count) {
1532 		*sec_count += sec_cnt;
1533 		*ded_count += ded_cnt;
1534 	}
1535 
1536 	gfx_v9_4_2_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1537 	mutex_unlock(&adev->grbm_idx_mutex);
1538 
1539 	return 0;
1540 }
1541 
gfx_v9_4_2_log_utc_edc_count(struct amdgpu_device * adev,struct gfx_v9_4_2_utc_block * blk,uint32_t instance,uint32_t sec_cnt,uint32_t ded_cnt)1542 static void gfx_v9_4_2_log_utc_edc_count(struct amdgpu_device *adev,
1543 					 struct gfx_v9_4_2_utc_block *blk,
1544 					 uint32_t instance, uint32_t sec_cnt,
1545 					 uint32_t ded_cnt)
1546 {
1547 	uint32_t bank, way, mem;
1548 	static const char *vml2_way_str[] = { "BIGK", "4K" };
1549 	static const char *utcl2_rounter_str[] = { "VMC", "APT" };
1550 
1551 	mem = instance % blk->num_mem_blocks;
1552 	way = (instance / blk->num_mem_blocks) % blk->num_ways;
1553 	bank = instance / (blk->num_mem_blocks * blk->num_ways);
1554 
1555 	switch (blk->type) {
1556 	case VML2_MEM:
1557 		dev_info(
1558 			adev->dev,
1559 			"GFX SubBlock UTC_VML2_BANK_CACHE_%d_%s_MEM%d, SED %d, DED %d\n",
1560 			bank, vml2_way_str[way], mem, sec_cnt, ded_cnt);
1561 		break;
1562 	case VML2_WALKER_MEM:
1563 		dev_info(adev->dev, "GFX SubBlock %s, SED %d, DED %d\n",
1564 			 vml2_walker_mems[bank], sec_cnt, ded_cnt);
1565 		break;
1566 	case UTCL2_MEM:
1567 		dev_info(
1568 			adev->dev,
1569 			"GFX SubBlock UTCL2_ROUTER_IFIF%d_GROUP0_%s, SED %d, DED %d\n",
1570 			bank, utcl2_rounter_str[mem], sec_cnt, ded_cnt);
1571 		break;
1572 	case ATC_L2_CACHE_2M:
1573 		dev_info(
1574 			adev->dev,
1575 			"GFX SubBlock UTC_ATCL2_CACHE_2M_BANK%d_WAY%d_MEM, SED %d, DED %d\n",
1576 			bank, way, sec_cnt, ded_cnt);
1577 		break;
1578 	case ATC_L2_CACHE_32K:
1579 		dev_info(
1580 			adev->dev,
1581 			"GFX SubBlock UTC_ATCL2_CACHE_32K_BANK%d_WAY%d_MEM%d, SED %d, DED %d\n",
1582 			bank, way, mem, sec_cnt, ded_cnt);
1583 		break;
1584 	case ATC_L2_CACHE_4K:
1585 		dev_info(
1586 			adev->dev,
1587 			"GFX SubBlock UTC_ATCL2_CACHE_4K_BANK%d_WAY%d_MEM%d, SED %d, DED %d\n",
1588 			bank, way, mem, sec_cnt, ded_cnt);
1589 		break;
1590 	}
1591 }
1592 
gfx_v9_4_2_query_utc_edc_count(struct amdgpu_device * adev,uint32_t * sec_count,uint32_t * ded_count)1593 static int gfx_v9_4_2_query_utc_edc_count(struct amdgpu_device *adev,
1594 					  uint32_t *sec_count,
1595 					  uint32_t *ded_count)
1596 {
1597 	uint32_t i, j, data;
1598 	uint32_t sec_cnt, ded_cnt;
1599 	uint32_t num_instances;
1600 	struct gfx_v9_4_2_utc_block *blk;
1601 
1602 	if (sec_count && ded_count) {
1603 		*sec_count = 0;
1604 		*ded_count = 0;
1605 	}
1606 
1607 	for (i = 0; i < ARRAY_SIZE(gfx_v9_4_2_utc_blocks); i++) {
1608 		blk = &gfx_v9_4_2_utc_blocks[i];
1609 		num_instances =
1610 			blk->num_banks * blk->num_ways * blk->num_mem_blocks;
1611 		for (j = 0; j < num_instances; j++) {
1612 			WREG32(SOC15_REG_ENTRY_OFFSET(blk->idx_reg), j);
1613 
1614 			/* if sec/ded_count is NULL, just clear counter */
1615 			if (!sec_count || !ded_count) {
1616 				WREG32(SOC15_REG_ENTRY_OFFSET(blk->data_reg),
1617 				       blk->clear);
1618 				continue;
1619 			}
1620 
1621 			data = RREG32(SOC15_REG_ENTRY_OFFSET(blk->data_reg));
1622 			if (!data)
1623 				continue;
1624 
1625 			sec_cnt = SOC15_RAS_REG_FIELD_VAL(data, *blk, sec);
1626 			*sec_count += sec_cnt;
1627 			ded_cnt = SOC15_RAS_REG_FIELD_VAL(data, *blk, ded);
1628 			*ded_count += ded_cnt;
1629 
1630 			/* clear counter after read */
1631 			WREG32(SOC15_REG_ENTRY_OFFSET(blk->data_reg),
1632 			       blk->clear);
1633 
1634 			/* print the edc count */
1635 			if (sec_cnt || ded_cnt)
1636 				gfx_v9_4_2_log_utc_edc_count(adev, blk, j, sec_cnt,
1637 							     ded_cnt);
1638 		}
1639 	}
1640 
1641 	return 0;
1642 }
1643 
gfx_v9_4_2_query_ras_error_count(struct amdgpu_device * adev,void * ras_error_status)1644 static void gfx_v9_4_2_query_ras_error_count(struct amdgpu_device *adev,
1645 					    void *ras_error_status)
1646 {
1647 	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
1648 	uint32_t sec_count = 0, ded_count = 0;
1649 
1650 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
1651 		return;
1652 
1653 	err_data->ue_count = 0;
1654 	err_data->ce_count = 0;
1655 
1656 	gfx_v9_4_2_query_sram_edc_count(adev, &sec_count, &ded_count);
1657 	err_data->ce_count += sec_count;
1658 	err_data->ue_count += ded_count;
1659 
1660 	gfx_v9_4_2_query_utc_edc_count(adev, &sec_count, &ded_count);
1661 	err_data->ce_count += sec_count;
1662 	err_data->ue_count += ded_count;
1663 
1664 }
1665 
gfx_v9_4_2_reset_utc_err_status(struct amdgpu_device * adev)1666 static void gfx_v9_4_2_reset_utc_err_status(struct amdgpu_device *adev)
1667 {
1668 	WREG32_SOC15(GC, 0, regUTCL2_MEM_ECC_STATUS, 0x3);
1669 	WREG32_SOC15(GC, 0, regVML2_MEM_ECC_STATUS, 0x3);
1670 	WREG32_SOC15(GC, 0, regVML2_WALKER_MEM_ECC_STATUS, 0x3);
1671 }
1672 
gfx_v9_4_2_reset_ea_err_status(struct amdgpu_device * adev)1673 static void gfx_v9_4_2_reset_ea_err_status(struct amdgpu_device *adev)
1674 {
1675 	uint32_t i, j;
1676 	uint32_t value;
1677 
1678 	mutex_lock(&adev->grbm_idx_mutex);
1679 	for (i = 0; i < gfx_v9_4_2_ea_err_status_regs.se_num; i++) {
1680 		for (j = 0; j < gfx_v9_4_2_ea_err_status_regs.instance;
1681 		     j++) {
1682 			gfx_v9_4_2_select_se_sh(adev, i, 0, j);
1683 			value = RREG32(SOC15_REG_ENTRY_OFFSET(
1684 				gfx_v9_4_2_ea_err_status_regs));
1685 			value = REG_SET_FIELD(value, GCEA_ERR_STATUS, CLEAR_ERROR_STATUS, 0x1);
1686 			WREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_4_2_ea_err_status_regs), value);
1687 		}
1688 	}
1689 	gfx_v9_4_2_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1690 	mutex_unlock(&adev->grbm_idx_mutex);
1691 }
1692 
gfx_v9_4_2_reset_ras_error_count(struct amdgpu_device * adev)1693 static void gfx_v9_4_2_reset_ras_error_count(struct amdgpu_device *adev)
1694 {
1695 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
1696 		return;
1697 
1698 	gfx_v9_4_2_query_sram_edc_count(adev, NULL, NULL);
1699 	gfx_v9_4_2_query_utc_edc_count(adev, NULL, NULL);
1700 }
1701 
gfx_v9_4_2_ras_error_inject(struct amdgpu_device * adev,void * inject_if)1702 static int gfx_v9_4_2_ras_error_inject(struct amdgpu_device *adev, void *inject_if)
1703 {
1704 	struct ras_inject_if *info = (struct ras_inject_if *)inject_if;
1705 	int ret;
1706 	struct ta_ras_trigger_error_input block_info = { 0 };
1707 
1708 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
1709 		return -EINVAL;
1710 
1711 	block_info.block_id = amdgpu_ras_block_to_ta(info->head.block);
1712 	block_info.sub_block_index = info->head.sub_block_index;
1713 	block_info.inject_error_type = amdgpu_ras_error_to_ta(info->head.type);
1714 	block_info.address = info->address;
1715 	block_info.value = info->value;
1716 
1717 	mutex_lock(&adev->grbm_idx_mutex);
1718 	ret = psp_ras_trigger_error(&adev->psp, &block_info);
1719 	mutex_unlock(&adev->grbm_idx_mutex);
1720 
1721 	return ret;
1722 }
1723 
gfx_v9_4_2_query_ea_err_status(struct amdgpu_device * adev)1724 static void gfx_v9_4_2_query_ea_err_status(struct amdgpu_device *adev)
1725 {
1726 	uint32_t i, j;
1727 	uint32_t reg_value;
1728 
1729 	mutex_lock(&adev->grbm_idx_mutex);
1730 
1731 	for (i = 0; i < gfx_v9_4_2_ea_err_status_regs.se_num; i++) {
1732 		for (j = 0; j < gfx_v9_4_2_ea_err_status_regs.instance;
1733 		     j++) {
1734 			gfx_v9_4_2_select_se_sh(adev, i, 0, j);
1735 			reg_value = RREG32(SOC15_REG_ENTRY_OFFSET(
1736 				gfx_v9_4_2_ea_err_status_regs));
1737 
1738 			if (REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_RDRSP_STATUS) ||
1739 			    REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_WRRSP_STATUS) ||
1740 			    REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_RDRSP_DATAPARITY_ERROR)) {
1741 				dev_warn(adev->dev, "GCEA err detected at instance: %d, status: 0x%x!\n",
1742 						j, reg_value);
1743 			}
1744 			/* clear after read */
1745 			reg_value = REG_SET_FIELD(reg_value, GCEA_ERR_STATUS,
1746 						  CLEAR_ERROR_STATUS, 0x1);
1747 			WREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_4_2_ea_err_status_regs), reg_value);
1748 		}
1749 	}
1750 
1751 	gfx_v9_4_2_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1752 	mutex_unlock(&adev->grbm_idx_mutex);
1753 }
1754 
gfx_v9_4_2_query_utc_err_status(struct amdgpu_device * adev)1755 static void gfx_v9_4_2_query_utc_err_status(struct amdgpu_device *adev)
1756 {
1757 	uint32_t data;
1758 
1759 	data = RREG32_SOC15(GC, 0, regUTCL2_MEM_ECC_STATUS);
1760 	if (data) {
1761 		dev_warn(adev->dev, "GFX UTCL2 Mem Ecc Status: 0x%x!\n", data);
1762 		WREG32_SOC15(GC, 0, regUTCL2_MEM_ECC_STATUS, 0x3);
1763 	}
1764 
1765 	data = RREG32_SOC15(GC, 0, regVML2_MEM_ECC_STATUS);
1766 	if (data) {
1767 		dev_warn(adev->dev, "GFX VML2 Mem Ecc Status: 0x%x!\n", data);
1768 		WREG32_SOC15(GC, 0, regVML2_MEM_ECC_STATUS, 0x3);
1769 	}
1770 
1771 	data = RREG32_SOC15(GC, 0, regVML2_WALKER_MEM_ECC_STATUS);
1772 	if (data) {
1773 		dev_warn(adev->dev, "GFX VML2 Walker Mem Ecc Status: 0x%x!\n", data);
1774 		WREG32_SOC15(GC, 0, regVML2_WALKER_MEM_ECC_STATUS, 0x3);
1775 	}
1776 }
1777 
gfx_v9_4_2_query_ras_error_status(struct amdgpu_device * adev)1778 static void gfx_v9_4_2_query_ras_error_status(struct amdgpu_device *adev)
1779 {
1780 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
1781 		return;
1782 
1783 	gfx_v9_4_2_query_ea_err_status(adev);
1784 	gfx_v9_4_2_query_utc_err_status(adev);
1785 	gfx_v9_4_2_query_sq_timeout_status(adev);
1786 }
1787 
gfx_v9_4_2_reset_ras_error_status(struct amdgpu_device * adev)1788 static void gfx_v9_4_2_reset_ras_error_status(struct amdgpu_device *adev)
1789 {
1790 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
1791 		return;
1792 
1793 	gfx_v9_4_2_reset_utc_err_status(adev);
1794 	gfx_v9_4_2_reset_ea_err_status(adev);
1795 	gfx_v9_4_2_reset_sq_timeout_status(adev);
1796 }
1797 
gfx_v9_4_2_enable_watchdog_timer(struct amdgpu_device * adev)1798 static void gfx_v9_4_2_enable_watchdog_timer(struct amdgpu_device *adev)
1799 {
1800 	uint32_t i;
1801 	uint32_t data;
1802 
1803 	data = REG_SET_FIELD(0, SQ_TIMEOUT_CONFIG, TIMEOUT_FATAL_DISABLE,
1804 			     amdgpu_watchdog_timer.timeout_fatal_disable ? 1 :
1805 									   0);
1806 
1807 	if (amdgpu_watchdog_timer.timeout_fatal_disable &&
1808 	    (amdgpu_watchdog_timer.period < 1 ||
1809 	     amdgpu_watchdog_timer.period > 0x23)) {
1810 		dev_warn(adev->dev, "Watchdog period range is 1 to 0x23\n");
1811 		amdgpu_watchdog_timer.period = 0x23;
1812 	}
1813 	data = REG_SET_FIELD(data, SQ_TIMEOUT_CONFIG, PERIOD_SEL,
1814 			     amdgpu_watchdog_timer.period);
1815 
1816 	mutex_lock(&adev->grbm_idx_mutex);
1817 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1818 		gfx_v9_4_2_select_se_sh(adev, i, 0xffffffff, 0xffffffff);
1819 		WREG32_SOC15(GC, 0, regSQ_TIMEOUT_CONFIG, data);
1820 	}
1821 	gfx_v9_4_2_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1822 	mutex_unlock(&adev->grbm_idx_mutex);
1823 }
1824 
wave_read_ind(struct amdgpu_device * adev,uint32_t simd,uint32_t wave,uint32_t address)1825 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
1826 {
1827 	WREG32_SOC15_RLC_EX(reg, GC, 0, regSQ_IND_INDEX,
1828 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1829 		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
1830 		(address << SQ_IND_INDEX__INDEX__SHIFT) |
1831 		(SQ_IND_INDEX__FORCE_READ_MASK));
1832 	return RREG32_SOC15(GC, 0, regSQ_IND_DATA);
1833 }
1834 
gfx_v9_4_2_log_cu_timeout_status(struct amdgpu_device * adev,uint32_t status)1835 static void gfx_v9_4_2_log_cu_timeout_status(struct amdgpu_device *adev,
1836 					uint32_t status)
1837 {
1838 	struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
1839 	uint32_t i, simd, wave;
1840 	uint32_t wave_status;
1841 	uint32_t wave_pc_lo, wave_pc_hi;
1842 	uint32_t wave_exec_lo, wave_exec_hi;
1843 	uint32_t wave_inst_dw0, wave_inst_dw1;
1844 	uint32_t wave_ib_sts;
1845 
1846 	for (i = 0; i < 32; i++) {
1847 		if (!((i << 1) & status))
1848 			continue;
1849 
1850 		simd = i / cu_info->max_waves_per_simd;
1851 		wave = i % cu_info->max_waves_per_simd;
1852 
1853 		wave_status = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
1854 		wave_pc_lo = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
1855 		wave_pc_hi = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
1856 		wave_exec_lo =
1857 			wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
1858 		wave_exec_hi =
1859 			wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
1860 		wave_inst_dw0 =
1861 			wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
1862 		wave_inst_dw1 =
1863 			wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
1864 		wave_ib_sts = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
1865 
1866 		dev_info(
1867 			adev->dev,
1868 			"\t SIMD %d, Wave %d: status 0x%x, pc 0x%llx, exec 0x%llx, inst 0x%llx, ib_sts 0x%x\n",
1869 			simd, wave, wave_status,
1870 			((uint64_t)wave_pc_hi << 32 | wave_pc_lo),
1871 			((uint64_t)wave_exec_hi << 32 | wave_exec_lo),
1872 			((uint64_t)wave_inst_dw1 << 32 | wave_inst_dw0),
1873 			wave_ib_sts);
1874 	}
1875 }
1876 
gfx_v9_4_2_query_sq_timeout_status(struct amdgpu_device * adev)1877 static void gfx_v9_4_2_query_sq_timeout_status(struct amdgpu_device *adev)
1878 {
1879 	uint32_t se_idx, sh_idx, cu_idx;
1880 	uint32_t status;
1881 
1882 	mutex_lock(&adev->grbm_idx_mutex);
1883 	for (se_idx = 0; se_idx < adev->gfx.config.max_shader_engines;
1884 	     se_idx++) {
1885 		for (sh_idx = 0; sh_idx < adev->gfx.config.max_sh_per_se;
1886 		     sh_idx++) {
1887 			for (cu_idx = 0;
1888 			     cu_idx < adev->gfx.config.max_cu_per_sh;
1889 			     cu_idx++) {
1890 				gfx_v9_4_2_select_se_sh(adev, se_idx, sh_idx,
1891 							cu_idx);
1892 				status = RREG32_SOC15(GC, 0,
1893 						      regSQ_TIMEOUT_STATUS);
1894 				if (status != 0) {
1895 					dev_info(
1896 						adev->dev,
1897 						"GFX Watchdog Timeout: SE %d, SH %d, CU %d\n",
1898 						se_idx, sh_idx, cu_idx);
1899 					gfx_v9_4_2_log_cu_timeout_status(
1900 						adev, status);
1901 				}
1902 				/* clear old status */
1903 				WREG32_SOC15(GC, 0, regSQ_TIMEOUT_STATUS, 0);
1904 			}
1905 		}
1906 	}
1907 	gfx_v9_4_2_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1908 	mutex_unlock(&adev->grbm_idx_mutex);
1909 }
1910 
gfx_v9_4_2_reset_sq_timeout_status(struct amdgpu_device * adev)1911 static void gfx_v9_4_2_reset_sq_timeout_status(struct amdgpu_device *adev)
1912 {
1913 	uint32_t se_idx, sh_idx, cu_idx;
1914 
1915 	mutex_lock(&adev->grbm_idx_mutex);
1916 	for (se_idx = 0; se_idx < adev->gfx.config.max_shader_engines;
1917 	     se_idx++) {
1918 		for (sh_idx = 0; sh_idx < adev->gfx.config.max_sh_per_se;
1919 		     sh_idx++) {
1920 			for (cu_idx = 0;
1921 			     cu_idx < adev->gfx.config.max_cu_per_sh;
1922 			     cu_idx++) {
1923 				gfx_v9_4_2_select_se_sh(adev, se_idx, sh_idx,
1924 							cu_idx);
1925 				WREG32_SOC15(GC, 0, regSQ_TIMEOUT_STATUS, 0);
1926 			}
1927 		}
1928 	}
1929 	gfx_v9_4_2_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1930 	mutex_unlock(&adev->grbm_idx_mutex);
1931 }
1932 
gfx_v9_4_2_query_uctl2_poison_status(struct amdgpu_device * adev)1933 static bool gfx_v9_4_2_query_uctl2_poison_status(struct amdgpu_device *adev)
1934 {
1935 	u32 status = 0;
1936 	struct amdgpu_vmhub *hub;
1937 
1938 	hub = &adev->vmhub[AMDGPU_GFXHUB_0];
1939 	status = RREG32(hub->vm_l2_pro_fault_status);
1940 	/* reset page fault status */
1941 	WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
1942 
1943 	return REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED);
1944 }
1945 
1946 struct amdgpu_ras_block_hw_ops  gfx_v9_4_2_ras_ops = {
1947 		.ras_error_inject = &gfx_v9_4_2_ras_error_inject,
1948 		.query_ras_error_count = &gfx_v9_4_2_query_ras_error_count,
1949 		.reset_ras_error_count = &gfx_v9_4_2_reset_ras_error_count,
1950 		.query_ras_error_status = &gfx_v9_4_2_query_ras_error_status,
1951 		.reset_ras_error_status = &gfx_v9_4_2_reset_ras_error_status,
1952 };
1953 
1954 struct amdgpu_gfx_ras gfx_v9_4_2_ras = {
1955 	.ras_block = {
1956 		.hw_ops = &gfx_v9_4_2_ras_ops,
1957 	},
1958 	.enable_watchdog_timer = &gfx_v9_4_2_enable_watchdog_timer,
1959 	.query_utcl2_poison_status = gfx_v9_4_2_query_uctl2_poison_status,
1960 };
1961