• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include <linux/slab.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
28 
29 #include "amdgpu.h"
30 #include "amdgpu_atombios.h"
31 #include "amdgpu_ih.h"
32 #include "amdgpu_uvd.h"
33 #include "amdgpu_vce.h"
34 #include "atom.h"
35 #include "amd_pcie.h"
36 #include "si_dpm.h"
37 #include "sid.h"
38 #include "si_ih.h"
39 #include "gfx_v6_0.h"
40 #include "gmc_v6_0.h"
41 #include "si_dma.h"
42 #include "dce_v6_0.h"
43 #include "si.h"
44 #include "uvd_v3_1.h"
45 #include "dce_virtual.h"
46 #include "gca/gfx_6_0_d.h"
47 #include "oss/oss_1_0_d.h"
48 #include "oss/oss_1_0_sh_mask.h"
49 #include "gmc/gmc_6_0_d.h"
50 #include "dce/dce_6_0_d.h"
51 #include "uvd/uvd_4_0_d.h"
52 #include "bif/bif_3_0_d.h"
53 #include "bif/bif_3_0_sh_mask.h"
54 
55 #include "amdgpu_dm.h"
56 
57 static const u32 tahiti_golden_registers[] =
58 {
59 	mmAZALIA_SCLK_CONTROL, 0x00000030, 0x00000011,
60 	mmCB_HW_CONTROL, 0x00010000, 0x00018208,
61 	mmDB_DEBUG, 0xffffffff, 0x00000000,
62 	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
63 	mmDB_DEBUG3, 0x0002021c, 0x00020200,
64 	mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
65 	0x340c, 0x000000c0, 0x00800040,
66 	0x360c, 0x000000c0, 0x00800040,
67 	mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
68 	mmFBC_MISC, 0x00200000, 0x50100000,
69 	mmDIG0_HDMI_CONTROL, 0x31000311, 0x00000011,
70 	mmMC_ARB_WTM_CNTL_RD, 0x00000003, 0x000007ff,
71 	mmMC_XPB_P2P_BAR_CFG, 0x000007ff, 0x00000000,
72 	mmPA_CL_ENHANCE, 0xf000001f, 0x00000007,
73 	mmPA_SC_FORCE_EOV_MAX_CNTS, 0xffffffff, 0x00ffffff,
74 	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
75 	mmPA_SC_MODE_CNTL_1, 0x07ffffff, 0x4e000000,
76 	mmPA_SC_RASTER_CONFIG, 0x3f3f3fff, 0x2a00126a,
77 	0x000c, 0xffffffff, 0x0040,
78 	0x000d, 0x00000040, 0x00004040,
79 	mmSPI_CONFIG_CNTL, 0x07ffffff, 0x03000000,
80 	mmSQ_DED_CNT, 0x01ff1f3f, 0x00000000,
81 	mmSQ_SEC_CNT, 0x01ff1f3f, 0x00000000,
82 	mmSX_DEBUG_1, 0x0000007f, 0x00000020,
83 	mmTA_CNTL_AUX, 0x00010000, 0x00010000,
84 	mmTCP_ADDR_CONFIG, 0x00000200, 0x000002fb,
85 	mmTCP_CHAN_STEER_HI, 0xffffffff, 0x0000543b,
86 	mmTCP_CHAN_STEER_LO, 0xffffffff, 0xa9210876,
87 	mmVGT_FIFO_DEPTHS, 0xffffffff, 0x000fff40,
88 	mmVGT_GS_VERTEX_REUSE, 0x0000001f, 0x00000010,
89 	mmVM_CONTEXT0_CNTL, 0x20000000, 0x20fffed8,
90 	mmVM_L2_CG, 0x000c0fc0, 0x000c0400,
91 	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0xffffffff,
92 	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
93 	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
94 	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
95 };
96 
97 static const u32 tahiti_golden_registers2[] =
98 {
99 	mmMCIF_MEM_CONTROL, 0x00000001, 0x00000001,
100 };
101 
102 static const u32 tahiti_golden_rlc_registers[] =
103 {
104 	mmGB_ADDR_CONFIG, 0xffffffff, 0x12011003,
105 	mmRLC_LB_PARAMS, 0xffffffff, 0x00601005,
106 	0x311f, 0xffffffff, 0x10104040,
107 	0x3122, 0xffffffff, 0x0100000a,
108 	mmRLC_LB_CNTR_MAX, 0xffffffff, 0x00000800,
109 	mmRLC_LB_CNTL, 0xffffffff, 0x800000f4,
110 	mmUVD_CGC_GATE, 0x00000008, 0x00000000,
111 };
112 
113 static const u32 pitcairn_golden_registers[] =
114 {
115 	mmAZALIA_SCLK_CONTROL, 0x00000030, 0x00000011,
116 	mmCB_HW_CONTROL, 0x00010000, 0x00018208,
117 	mmDB_DEBUG, 0xffffffff, 0x00000000,
118 	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
119 	mmDB_DEBUG3, 0x0002021c, 0x00020200,
120 	mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
121 	0x340c, 0x000300c0, 0x00800040,
122 	0x360c, 0x000300c0, 0x00800040,
123 	mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
124 	mmFBC_MISC, 0x00200000, 0x50100000,
125 	mmDIG0_HDMI_CONTROL, 0x31000311, 0x00000011,
126 	mmMC_SEQ_PMG_PG_HWCNTL, 0x00073ffe, 0x000022a2,
127 	mmMC_XPB_P2P_BAR_CFG, 0x000007ff, 0x00000000,
128 	mmPA_CL_ENHANCE, 0xf000001f, 0x00000007,
129 	mmPA_SC_FORCE_EOV_MAX_CNTS, 0xffffffff, 0x00ffffff,
130 	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
131 	mmPA_SC_MODE_CNTL_1, 0x07ffffff, 0x4e000000,
132 	mmPA_SC_RASTER_CONFIG, 0x3f3f3fff, 0x2a00126a,
133 	0x000c, 0xffffffff, 0x0040,
134 	0x000d, 0x00000040, 0x00004040,
135 	mmSPI_CONFIG_CNTL, 0x07ffffff, 0x03000000,
136 	mmSX_DEBUG_1, 0x0000007f, 0x00000020,
137 	mmTA_CNTL_AUX, 0x00010000, 0x00010000,
138 	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f7,
139 	mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
140 	mmTCP_CHAN_STEER_LO, 0xffffffff, 0x32761054,
141 	mmVGT_GS_VERTEX_REUSE, 0x0000001f, 0x00000010,
142 	mmVM_L2_CG, 0x000c0fc0, 0x000c0400,
143 	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0xffffffff,
144 	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
145 	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
146 	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
147 };
148 
149 static const u32 pitcairn_golden_rlc_registers[] =
150 {
151 	mmGB_ADDR_CONFIG, 0xffffffff, 0x12011003,
152 	mmRLC_LB_PARAMS, 0xffffffff, 0x00601004,
153 	0x311f, 0xffffffff, 0x10102020,
154 	0x3122, 0xffffffff, 0x01000020,
155 	mmRLC_LB_CNTR_MAX, 0xffffffff, 0x00000800,
156 	mmRLC_LB_CNTL, 0xffffffff, 0x800000a4,
157 };
158 
159 static const u32 verde_pg_init[] =
160 {
161 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x40000,
162 	mmGMCON_PGFSM_CONFIG, 0xffffffff, 0x200010ff,
163 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
164 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
165 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
166 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
167 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
168 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x7007,
169 	mmGMCON_PGFSM_CONFIG, 0xffffffff, 0x300010ff,
170 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
171 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
172 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
173 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
174 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
175 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x400000,
176 	mmGMCON_PGFSM_CONFIG, 0xffffffff, 0x100010ff,
177 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
178 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
179 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
180 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
181 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
182 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x120200,
183 	mmGMCON_PGFSM_CONFIG, 0xffffffff, 0x500010ff,
184 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
185 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
186 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
187 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
188 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
189 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x1e1e16,
190 	mmGMCON_PGFSM_CONFIG, 0xffffffff, 0x600010ff,
191 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
192 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
193 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
194 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
195 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
196 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x171f1e,
197 	mmGMCON_PGFSM_CONFIG, 0xffffffff, 0x700010ff,
198 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
199 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
200 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
201 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
202 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
203 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
204 	mmGMCON_PGFSM_CONFIG, 0xffffffff, 0x9ff,
205 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x0,
206 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x10000800,
207 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0xf,
208 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0xf,
209 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x4,
210 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x1000051e,
211 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0xffff,
212 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0xffff,
213 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x8,
214 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x80500,
215 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x12,
216 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x9050c,
217 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x1d,
218 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0xb052c,
219 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x2a,
220 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x1053e,
221 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x2d,
222 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x10546,
223 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x30,
224 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0xa054e,
225 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x3c,
226 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x1055f,
227 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x3f,
228 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x10567,
229 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x42,
230 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x1056f,
231 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x45,
232 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x10572,
233 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x48,
234 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x20575,
235 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x4c,
236 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x190801,
237 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x67,
238 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x1082a,
239 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x6a,
240 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x1b082d,
241 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x87,
242 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x310851,
243 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0xba,
244 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x891,
245 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0xbc,
246 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x893,
247 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0xbe,
248 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x20895,
249 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0xc2,
250 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x20899,
251 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0xc6,
252 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x2089d,
253 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0xca,
254 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x8a1,
255 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0xcc,
256 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x8a3,
257 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0xce,
258 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x308a5,
259 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0xd3,
260 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x6d08cd,
261 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x142,
262 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x2000095a,
263 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x1,
264 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x144,
265 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x301f095b,
266 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x165,
267 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0xc094d,
268 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x173,
269 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0xf096d,
270 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x184,
271 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x15097f,
272 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x19b,
273 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0xc0998,
274 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x1a9,
275 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x409a7,
276 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x1af,
277 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0xcdc,
278 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x1b1,
279 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x800,
280 	mmGMCON_RENG_EXECUTE, 0xffffffff, 0x6c9b2000,
281 	mmGMCON_MISC2, 0xfc00, 0x2000,
282 	mmGMCON_MISC3, 0xffffffff, 0xfc0,
283 	mmMC_PMG_AUTO_CFG, 0x00000100, 0x100,
284 };
285 
286 static const u32 verde_golden_rlc_registers[] =
287 {
288 	mmGB_ADDR_CONFIG, 0xffffffff, 0x02010002,
289 	mmRLC_LB_PARAMS, 0xffffffff, 0x033f1005,
290 	0x311f, 0xffffffff, 0x10808020,
291 	0x3122, 0xffffffff, 0x00800008,
292 	mmRLC_LB_CNTR_MAX, 0xffffffff, 0x00001000,
293 	mmRLC_LB_CNTL, 0xffffffff, 0x80010014,
294 };
295 
296 static const u32 verde_golden_registers[] =
297 {
298 	mmAZALIA_SCLK_CONTROL, 0x00000030, 0x00000011,
299 	mmCB_HW_CONTROL, 0x00010000, 0x00018208,
300 	mmDB_DEBUG, 0xffffffff, 0x00000000,
301 	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
302 	mmDB_DEBUG3, 0x0002021c, 0x00020200,
303 	mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
304 	0x340c, 0x000300c0, 0x00800040,
305 	0x360c, 0x000300c0, 0x00800040,
306 	mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
307 	mmFBC_MISC, 0x00200000, 0x50100000,
308 	mmDIG0_HDMI_CONTROL, 0x31000311, 0x00000011,
309 	mmMC_SEQ_PMG_PG_HWCNTL, 0x00073ffe, 0x000022a2,
310 	mmMC_XPB_P2P_BAR_CFG, 0x000007ff, 0x00000000,
311 	mmPA_CL_ENHANCE, 0xf000001f, 0x00000007,
312 	mmPA_SC_FORCE_EOV_MAX_CNTS, 0xffffffff, 0x00ffffff,
313 	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
314 	mmPA_SC_MODE_CNTL_1, 0x07ffffff, 0x4e000000,
315 	mmPA_SC_RASTER_CONFIG, 0x3f3f3fff, 0x0000124a,
316 	0x000c, 0xffffffff, 0x0040,
317 	0x000d, 0x00000040, 0x00004040,
318 	mmSPI_CONFIG_CNTL, 0x07ffffff, 0x03000000,
319 	mmSQ_DED_CNT, 0x01ff1f3f, 0x00000000,
320 	mmSQ_SEC_CNT, 0x01ff1f3f, 0x00000000,
321 	mmSX_DEBUG_1, 0x0000007f, 0x00000020,
322 	mmTA_CNTL_AUX, 0x00010000, 0x00010000,
323 	mmTCP_ADDR_CONFIG, 0x000003ff, 0x00000003,
324 	mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
325 	mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00001032,
326 	mmVGT_GS_VERTEX_REUSE, 0x0000001f, 0x00000010,
327 	mmVM_L2_CG, 0x000c0fc0, 0x000c0400,
328 	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0xffffffff,
329 	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
330 	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
331 	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
332 };
333 
334 static const u32 oland_golden_registers[] =
335 {
336 	mmAZALIA_SCLK_CONTROL, 0x00000030, 0x00000011,
337 	mmCB_HW_CONTROL, 0x00010000, 0x00018208,
338 	mmDB_DEBUG, 0xffffffff, 0x00000000,
339 	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
340 	mmDB_DEBUG3, 0x0002021c, 0x00020200,
341 	mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
342 	0x340c, 0x000300c0, 0x00800040,
343 	0x360c, 0x000300c0, 0x00800040,
344 	mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
345 	mmFBC_MISC, 0x00200000, 0x50100000,
346 	mmDIG0_HDMI_CONTROL, 0x31000311, 0x00000011,
347 	mmMC_SEQ_PMG_PG_HWCNTL, 0x00073ffe, 0x000022a2,
348 	mmMC_XPB_P2P_BAR_CFG, 0x000007ff, 0x00000000,
349 	mmPA_CL_ENHANCE, 0xf000001f, 0x00000007,
350 	mmPA_SC_FORCE_EOV_MAX_CNTS, 0xffffffff, 0x00ffffff,
351 	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
352 	mmPA_SC_MODE_CNTL_1, 0x07ffffff, 0x4e000000,
353 	mmPA_SC_RASTER_CONFIG, 0x3f3f3fff, 0x00000082,
354 	0x000c, 0xffffffff, 0x0040,
355 	0x000d, 0x00000040, 0x00004040,
356 	mmSPI_CONFIG_CNTL, 0x07ffffff, 0x03000000,
357 	mmSX_DEBUG_1, 0x0000007f, 0x00000020,
358 	mmTA_CNTL_AUX, 0x00010000, 0x00010000,
359 	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f3,
360 	mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
361 	mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003210,
362 	mmVGT_GS_VERTEX_REUSE, 0x0000001f, 0x00000010,
363 	mmVM_L2_CG, 0x000c0fc0, 0x000c0400,
364 	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0xffffffff,
365 	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
366 	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
367 	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
368 
369 };
370 
371 static const u32 oland_golden_rlc_registers[] =
372 {
373 	mmGB_ADDR_CONFIG, 0xffffffff, 0x02010002,
374 	mmRLC_LB_PARAMS, 0xffffffff, 0x00601005,
375 	0x311f, 0xffffffff, 0x10104040,
376 	0x3122, 0xffffffff, 0x0100000a,
377 	mmRLC_LB_CNTR_MAX, 0xffffffff, 0x00000800,
378 	mmRLC_LB_CNTL, 0xffffffff, 0x800000f4,
379 };
380 
381 static const u32 hainan_golden_registers[] =
382 {
383 	0x17bc, 0x00000030, 0x00000011,
384 	mmCB_HW_CONTROL, 0x00010000, 0x00018208,
385 	mmDB_DEBUG, 0xffffffff, 0x00000000,
386 	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
387 	mmDB_DEBUG3, 0x0002021c, 0x00020200,
388 	0x031e, 0x00000080, 0x00000000,
389 	0x3430, 0xff000fff, 0x00000100,
390 	0x340c, 0x000300c0, 0x00800040,
391 	0x3630, 0xff000fff, 0x00000100,
392 	0x360c, 0x000300c0, 0x00800040,
393 	0x16ec, 0x000000f0, 0x00000070,
394 	0x16f0, 0x00200000, 0x50100000,
395 	0x1c0c, 0x31000311, 0x00000011,
396 	mmMC_SEQ_PMG_PG_HWCNTL, 0x00073ffe, 0x000022a2,
397 	mmMC_XPB_P2P_BAR_CFG, 0x000007ff, 0x00000000,
398 	mmPA_CL_ENHANCE, 0xf000001f, 0x00000007,
399 	mmPA_SC_FORCE_EOV_MAX_CNTS, 0xffffffff, 0x00ffffff,
400 	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
401 	mmPA_SC_MODE_CNTL_1, 0x07ffffff, 0x4e000000,
402 	mmPA_SC_RASTER_CONFIG, 0x3f3f3fff, 0x00000000,
403 	0x000c, 0xffffffff, 0x0040,
404 	0x000d, 0x00000040, 0x00004040,
405 	mmSPI_CONFIG_CNTL, 0x03e00000, 0x03600000,
406 	mmSX_DEBUG_1, 0x0000007f, 0x00000020,
407 	mmTA_CNTL_AUX, 0x00010000, 0x00010000,
408 	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f1,
409 	mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
410 	mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003210,
411 	mmVGT_GS_VERTEX_REUSE, 0x0000001f, 0x00000010,
412 	mmVM_L2_CG, 0x000c0fc0, 0x000c0400,
413 	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0xffffffff,
414 	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
415 	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
416 	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
417 };
418 
419 static const u32 hainan_golden_registers2[] =
420 {
421 	mmGB_ADDR_CONFIG, 0xffffffff, 0x2011003,
422 };
423 
424 static const u32 tahiti_mgcg_cgcg_init[] =
425 {
426 	mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xfffffffc,
427 	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
428 	mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
429 	mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
430 	mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
431 	mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
432 	mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
433 	mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
434 	mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
435 	mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
436 	mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
437 	mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
438 	mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
439 	mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
440 	mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
441 	mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
442 	mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
443 	mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
444 	mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
445 	mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
446 	mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
447 	mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
448 	mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
449 	mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
450 	mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
451 	mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
452 	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
453 	0x2458, 0xffffffff, 0x00010000,
454 	0x2459, 0xffffffff, 0x00030002,
455 	0x245a, 0xffffffff, 0x00040007,
456 	0x245b, 0xffffffff, 0x00060005,
457 	0x245c, 0xffffffff, 0x00090008,
458 	0x245d, 0xffffffff, 0x00020001,
459 	0x245e, 0xffffffff, 0x00040003,
460 	0x245f, 0xffffffff, 0x00000007,
461 	0x2460, 0xffffffff, 0x00060005,
462 	0x2461, 0xffffffff, 0x00090008,
463 	0x2462, 0xffffffff, 0x00030002,
464 	0x2463, 0xffffffff, 0x00050004,
465 	0x2464, 0xffffffff, 0x00000008,
466 	0x2465, 0xffffffff, 0x00070006,
467 	0x2466, 0xffffffff, 0x000a0009,
468 	0x2467, 0xffffffff, 0x00040003,
469 	0x2468, 0xffffffff, 0x00060005,
470 	0x2469, 0xffffffff, 0x00000009,
471 	0x246a, 0xffffffff, 0x00080007,
472 	0x246b, 0xffffffff, 0x000b000a,
473 	0x246c, 0xffffffff, 0x00050004,
474 	0x246d, 0xffffffff, 0x00070006,
475 	0x246e, 0xffffffff, 0x0008000b,
476 	0x246f, 0xffffffff, 0x000a0009,
477 	0x2470, 0xffffffff, 0x000d000c,
478 	0x2471, 0xffffffff, 0x00060005,
479 	0x2472, 0xffffffff, 0x00080007,
480 	0x2473, 0xffffffff, 0x0000000b,
481 	0x2474, 0xffffffff, 0x000a0009,
482 	0x2475, 0xffffffff, 0x000d000c,
483 	0x2476, 0xffffffff, 0x00070006,
484 	0x2477, 0xffffffff, 0x00090008,
485 	0x2478, 0xffffffff, 0x0000000c,
486 	0x2479, 0xffffffff, 0x000b000a,
487 	0x247a, 0xffffffff, 0x000e000d,
488 	0x247b, 0xffffffff, 0x00080007,
489 	0x247c, 0xffffffff, 0x000a0009,
490 	0x247d, 0xffffffff, 0x0000000d,
491 	0x247e, 0xffffffff, 0x000c000b,
492 	0x247f, 0xffffffff, 0x000f000e,
493 	0x2480, 0xffffffff, 0x00090008,
494 	0x2481, 0xffffffff, 0x000b000a,
495 	0x2482, 0xffffffff, 0x000c000f,
496 	0x2483, 0xffffffff, 0x000e000d,
497 	0x2484, 0xffffffff, 0x00110010,
498 	0x2485, 0xffffffff, 0x000a0009,
499 	0x2486, 0xffffffff, 0x000c000b,
500 	0x2487, 0xffffffff, 0x0000000f,
501 	0x2488, 0xffffffff, 0x000e000d,
502 	0x2489, 0xffffffff, 0x00110010,
503 	0x248a, 0xffffffff, 0x000b000a,
504 	0x248b, 0xffffffff, 0x000d000c,
505 	0x248c, 0xffffffff, 0x00000010,
506 	0x248d, 0xffffffff, 0x000f000e,
507 	0x248e, 0xffffffff, 0x00120011,
508 	0x248f, 0xffffffff, 0x000c000b,
509 	0x2490, 0xffffffff, 0x000e000d,
510 	0x2491, 0xffffffff, 0x00000011,
511 	0x2492, 0xffffffff, 0x0010000f,
512 	0x2493, 0xffffffff, 0x00130012,
513 	0x2494, 0xffffffff, 0x000d000c,
514 	0x2495, 0xffffffff, 0x000f000e,
515 	0x2496, 0xffffffff, 0x00100013,
516 	0x2497, 0xffffffff, 0x00120011,
517 	0x2498, 0xffffffff, 0x00150014,
518 	0x2499, 0xffffffff, 0x000e000d,
519 	0x249a, 0xffffffff, 0x0010000f,
520 	0x249b, 0xffffffff, 0x00000013,
521 	0x249c, 0xffffffff, 0x00120011,
522 	0x249d, 0xffffffff, 0x00150014,
523 	0x249e, 0xffffffff, 0x000f000e,
524 	0x249f, 0xffffffff, 0x00110010,
525 	0x24a0, 0xffffffff, 0x00000014,
526 	0x24a1, 0xffffffff, 0x00130012,
527 	0x24a2, 0xffffffff, 0x00160015,
528 	0x24a3, 0xffffffff, 0x0010000f,
529 	0x24a4, 0xffffffff, 0x00120011,
530 	0x24a5, 0xffffffff, 0x00000015,
531 	0x24a6, 0xffffffff, 0x00140013,
532 	0x24a7, 0xffffffff, 0x00170016,
533 	mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200,
534 	mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
535 	mmRLC_GCPM_GENERAL_3, 0xffffffff, 0x00000080,
536 	mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f,
537 	0x000c, 0xffffffff, 0x0000001c,
538 	0x000d, 0x000f0000, 0x000f0000,
539 	0x0583, 0xffffffff, 0x00000100,
540 	mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
541 	mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
542 	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104,
543 	mmMC_CITF_MISC_WR_CG, 0x000c0000, 0x000c0000,
544 	mmMC_CITF_MISC_RD_CG, 0x000c0000, 0x000c0000,
545 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
546 	0x157a, 0x00000001, 0x00000001,
547 	mmHDP_MEM_POWER_LS, 0x00000001, 0x00000001,
548 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
549 	mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
550 	0x3430, 0xfffffff0, 0x00000100,
551 	0x3630, 0xfffffff0, 0x00000100,
552 };
553 static const u32 pitcairn_mgcg_cgcg_init[] =
554 {
555 	mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xfffffffc,
556 	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
557 	mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
558 	mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
559 	mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
560 	mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
561 	mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
562 	mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
563 	mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
564 	mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
565 	mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
566 	mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
567 	mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
568 	mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
569 	mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
570 	mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
571 	mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
572 	mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
573 	mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
574 	mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
575 	mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
576 	mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
577 	mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
578 	mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
579 	mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
580 	mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
581 	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
582 	0x2458, 0xffffffff, 0x00010000,
583 	0x2459, 0xffffffff, 0x00030002,
584 	0x245a, 0xffffffff, 0x00040007,
585 	0x245b, 0xffffffff, 0x00060005,
586 	0x245c, 0xffffffff, 0x00090008,
587 	0x245d, 0xffffffff, 0x00020001,
588 	0x245e, 0xffffffff, 0x00040003,
589 	0x245f, 0xffffffff, 0x00000007,
590 	0x2460, 0xffffffff, 0x00060005,
591 	0x2461, 0xffffffff, 0x00090008,
592 	0x2462, 0xffffffff, 0x00030002,
593 	0x2463, 0xffffffff, 0x00050004,
594 	0x2464, 0xffffffff, 0x00000008,
595 	0x2465, 0xffffffff, 0x00070006,
596 	0x2466, 0xffffffff, 0x000a0009,
597 	0x2467, 0xffffffff, 0x00040003,
598 	0x2468, 0xffffffff, 0x00060005,
599 	0x2469, 0xffffffff, 0x00000009,
600 	0x246a, 0xffffffff, 0x00080007,
601 	0x246b, 0xffffffff, 0x000b000a,
602 	0x246c, 0xffffffff, 0x00050004,
603 	0x246d, 0xffffffff, 0x00070006,
604 	0x246e, 0xffffffff, 0x0008000b,
605 	0x246f, 0xffffffff, 0x000a0009,
606 	0x2470, 0xffffffff, 0x000d000c,
607 	0x2480, 0xffffffff, 0x00090008,
608 	0x2481, 0xffffffff, 0x000b000a,
609 	0x2482, 0xffffffff, 0x000c000f,
610 	0x2483, 0xffffffff, 0x000e000d,
611 	0x2484, 0xffffffff, 0x00110010,
612 	0x2485, 0xffffffff, 0x000a0009,
613 	0x2486, 0xffffffff, 0x000c000b,
614 	0x2487, 0xffffffff, 0x0000000f,
615 	0x2488, 0xffffffff, 0x000e000d,
616 	0x2489, 0xffffffff, 0x00110010,
617 	0x248a, 0xffffffff, 0x000b000a,
618 	0x248b, 0xffffffff, 0x000d000c,
619 	0x248c, 0xffffffff, 0x00000010,
620 	0x248d, 0xffffffff, 0x000f000e,
621 	0x248e, 0xffffffff, 0x00120011,
622 	0x248f, 0xffffffff, 0x000c000b,
623 	0x2490, 0xffffffff, 0x000e000d,
624 	0x2491, 0xffffffff, 0x00000011,
625 	0x2492, 0xffffffff, 0x0010000f,
626 	0x2493, 0xffffffff, 0x00130012,
627 	0x2494, 0xffffffff, 0x000d000c,
628 	0x2495, 0xffffffff, 0x000f000e,
629 	0x2496, 0xffffffff, 0x00100013,
630 	0x2497, 0xffffffff, 0x00120011,
631 	0x2498, 0xffffffff, 0x00150014,
632 	mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200,
633 	mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
634 	mmRLC_GCPM_GENERAL_3, 0xffffffff, 0x00000080,
635 	mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f,
636 	0x000c, 0xffffffff, 0x0000001c,
637 	0x000d, 0x000f0000, 0x000f0000,
638 	0x0583, 0xffffffff, 0x00000100,
639 	mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
640 	mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
641 	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104,
642 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
643 	0x157a, 0x00000001, 0x00000001,
644 	mmHDP_MEM_POWER_LS, 0x00000001, 0x00000001,
645 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
646 	mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
647 	0x3430, 0xfffffff0, 0x00000100,
648 	0x3630, 0xfffffff0, 0x00000100,
649 };
650 
651 static const u32 verde_mgcg_cgcg_init[] =
652 {
653 	mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xfffffffc,
654 	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
655 	mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
656 	mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
657 	mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
658 	mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
659 	mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
660 	mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
661 	mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
662 	mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
663 	mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
664 	mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
665 	mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
666 	mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
667 	mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
668 	mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
669 	mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
670 	mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
671 	mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
672 	mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
673 	mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
674 	mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
675 	mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
676 	mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
677 	mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
678 	mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
679 	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
680 	0x2458, 0xffffffff, 0x00010000,
681 	0x2459, 0xffffffff, 0x00030002,
682 	0x245a, 0xffffffff, 0x00040007,
683 	0x245b, 0xffffffff, 0x00060005,
684 	0x245c, 0xffffffff, 0x00090008,
685 	0x245d, 0xffffffff, 0x00020001,
686 	0x245e, 0xffffffff, 0x00040003,
687 	0x245f, 0xffffffff, 0x00000007,
688 	0x2460, 0xffffffff, 0x00060005,
689 	0x2461, 0xffffffff, 0x00090008,
690 	0x2462, 0xffffffff, 0x00030002,
691 	0x2463, 0xffffffff, 0x00050004,
692 	0x2464, 0xffffffff, 0x00000008,
693 	0x2465, 0xffffffff, 0x00070006,
694 	0x2466, 0xffffffff, 0x000a0009,
695 	0x2467, 0xffffffff, 0x00040003,
696 	0x2468, 0xffffffff, 0x00060005,
697 	0x2469, 0xffffffff, 0x00000009,
698 	0x246a, 0xffffffff, 0x00080007,
699 	0x246b, 0xffffffff, 0x000b000a,
700 	0x246c, 0xffffffff, 0x00050004,
701 	0x246d, 0xffffffff, 0x00070006,
702 	0x246e, 0xffffffff, 0x0008000b,
703 	0x246f, 0xffffffff, 0x000a0009,
704 	0x2470, 0xffffffff, 0x000d000c,
705 	0x2480, 0xffffffff, 0x00090008,
706 	0x2481, 0xffffffff, 0x000b000a,
707 	0x2482, 0xffffffff, 0x000c000f,
708 	0x2483, 0xffffffff, 0x000e000d,
709 	0x2484, 0xffffffff, 0x00110010,
710 	0x2485, 0xffffffff, 0x000a0009,
711 	0x2486, 0xffffffff, 0x000c000b,
712 	0x2487, 0xffffffff, 0x0000000f,
713 	0x2488, 0xffffffff, 0x000e000d,
714 	0x2489, 0xffffffff, 0x00110010,
715 	0x248a, 0xffffffff, 0x000b000a,
716 	0x248b, 0xffffffff, 0x000d000c,
717 	0x248c, 0xffffffff, 0x00000010,
718 	0x248d, 0xffffffff, 0x000f000e,
719 	0x248e, 0xffffffff, 0x00120011,
720 	0x248f, 0xffffffff, 0x000c000b,
721 	0x2490, 0xffffffff, 0x000e000d,
722 	0x2491, 0xffffffff, 0x00000011,
723 	0x2492, 0xffffffff, 0x0010000f,
724 	0x2493, 0xffffffff, 0x00130012,
725 	0x2494, 0xffffffff, 0x000d000c,
726 	0x2495, 0xffffffff, 0x000f000e,
727 	0x2496, 0xffffffff, 0x00100013,
728 	0x2497, 0xffffffff, 0x00120011,
729 	0x2498, 0xffffffff, 0x00150014,
730 	mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200,
731 	mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
732 	mmRLC_GCPM_GENERAL_3, 0xffffffff, 0x00000080,
733 	mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f,
734 	0x000c, 0xffffffff, 0x0000001c,
735 	0x000d, 0x000f0000, 0x000f0000,
736 	0x0583, 0xffffffff, 0x00000100,
737 	mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
738 	mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
739 	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104,
740 	mmMC_CITF_MISC_WR_CG, 0x000c0000, 0x000c0000,
741 	mmMC_CITF_MISC_RD_CG, 0x000c0000, 0x000c0000,
742 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
743 	0x157a, 0x00000001, 0x00000001,
744 	mmHDP_MEM_POWER_LS, 0x00000001, 0x00000001,
745 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
746 	mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
747 	0x3430, 0xfffffff0, 0x00000100,
748 	0x3630, 0xfffffff0, 0x00000100,
749 };
750 
751 static const u32 oland_mgcg_cgcg_init[] =
752 {
753 	mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xfffffffc,
754 	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
755 	mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
756 	mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
757 	mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
758 	mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
759 	mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
760 	mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
761 	mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
762 	mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
763 	mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
764 	mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
765 	mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
766 	mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
767 	mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
768 	mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
769 	mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
770 	mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
771 	mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
772 	mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
773 	mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
774 	mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
775 	mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
776 	mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
777 	mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
778 	mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
779 	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
780 	0x2458, 0xffffffff, 0x00010000,
781 	0x2459, 0xffffffff, 0x00030002,
782 	0x245a, 0xffffffff, 0x00040007,
783 	0x245b, 0xffffffff, 0x00060005,
784 	0x245c, 0xffffffff, 0x00090008,
785 	0x245d, 0xffffffff, 0x00020001,
786 	0x245e, 0xffffffff, 0x00040003,
787 	0x245f, 0xffffffff, 0x00000007,
788 	0x2460, 0xffffffff, 0x00060005,
789 	0x2461, 0xffffffff, 0x00090008,
790 	0x2462, 0xffffffff, 0x00030002,
791 	0x2463, 0xffffffff, 0x00050004,
792 	0x2464, 0xffffffff, 0x00000008,
793 	0x2465, 0xffffffff, 0x00070006,
794 	0x2466, 0xffffffff, 0x000a0009,
795 	0x2467, 0xffffffff, 0x00040003,
796 	0x2468, 0xffffffff, 0x00060005,
797 	0x2469, 0xffffffff, 0x00000009,
798 	0x246a, 0xffffffff, 0x00080007,
799 	0x246b, 0xffffffff, 0x000b000a,
800 	0x246c, 0xffffffff, 0x00050004,
801 	0x246d, 0xffffffff, 0x00070006,
802 	0x246e, 0xffffffff, 0x0008000b,
803 	0x246f, 0xffffffff, 0x000a0009,
804 	0x2470, 0xffffffff, 0x000d000c,
805 	0x2471, 0xffffffff, 0x00060005,
806 	0x2472, 0xffffffff, 0x00080007,
807 	0x2473, 0xffffffff, 0x0000000b,
808 	0x2474, 0xffffffff, 0x000a0009,
809 	0x2475, 0xffffffff, 0x000d000c,
810 	mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200,
811 	mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
812 	mmRLC_GCPM_GENERAL_3, 0xffffffff, 0x00000080,
813 	mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f,
814 	0x000c, 0xffffffff, 0x0000001c,
815 	0x000d, 0x000f0000, 0x000f0000,
816 	0x0583, 0xffffffff, 0x00000100,
817 	mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
818 	mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
819 	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104,
820 	mmMC_CITF_MISC_WR_CG, 0x000c0000, 0x000c0000,
821 	mmMC_CITF_MISC_RD_CG, 0x000c0000, 0x000c0000,
822 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
823 	0x157a, 0x00000001, 0x00000001,
824 	mmHDP_MEM_POWER_LS, 0x00000001, 0x00000001,
825 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
826 	mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
827 	0x3430, 0xfffffff0, 0x00000100,
828 	0x3630, 0xfffffff0, 0x00000100,
829 };
830 
831 static const u32 hainan_mgcg_cgcg_init[] =
832 {
833 	mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xfffffffc,
834 	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
835 	mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
836 	mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
837 	mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
838 	mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
839 	mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
840 	mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
841 	mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
842 	mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
843 	mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
844 	mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
845 	mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
846 	mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
847 	mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
848 	mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
849 	mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
850 	mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
851 	mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
852 	mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
853 	mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
854 	mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
855 	mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
856 	mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
857 	mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
858 	mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
859 	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
860 	0x2458, 0xffffffff, 0x00010000,
861 	0x2459, 0xffffffff, 0x00030002,
862 	0x245a, 0xffffffff, 0x00040007,
863 	0x245b, 0xffffffff, 0x00060005,
864 	0x245c, 0xffffffff, 0x00090008,
865 	0x245d, 0xffffffff, 0x00020001,
866 	0x245e, 0xffffffff, 0x00040003,
867 	0x245f, 0xffffffff, 0x00000007,
868 	0x2460, 0xffffffff, 0x00060005,
869 	0x2461, 0xffffffff, 0x00090008,
870 	0x2462, 0xffffffff, 0x00030002,
871 	0x2463, 0xffffffff, 0x00050004,
872 	0x2464, 0xffffffff, 0x00000008,
873 	0x2465, 0xffffffff, 0x00070006,
874 	0x2466, 0xffffffff, 0x000a0009,
875 	0x2467, 0xffffffff, 0x00040003,
876 	0x2468, 0xffffffff, 0x00060005,
877 	0x2469, 0xffffffff, 0x00000009,
878 	0x246a, 0xffffffff, 0x00080007,
879 	0x246b, 0xffffffff, 0x000b000a,
880 	0x246c, 0xffffffff, 0x00050004,
881 	0x246d, 0xffffffff, 0x00070006,
882 	0x246e, 0xffffffff, 0x0008000b,
883 	0x246f, 0xffffffff, 0x000a0009,
884 	0x2470, 0xffffffff, 0x000d000c,
885 	0x2471, 0xffffffff, 0x00060005,
886 	0x2472, 0xffffffff, 0x00080007,
887 	0x2473, 0xffffffff, 0x0000000b,
888 	0x2474, 0xffffffff, 0x000a0009,
889 	0x2475, 0xffffffff, 0x000d000c,
890 	mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200,
891 	mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
892 	mmRLC_GCPM_GENERAL_3, 0xffffffff, 0x00000080,
893 	mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f,
894 	0x000c, 0xffffffff, 0x0000001c,
895 	0x000d, 0x000f0000, 0x000f0000,
896 	0x0583, 0xffffffff, 0x00000100,
897 	0x0409, 0xffffffff, 0x00000100,
898 	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104,
899 	mmMC_CITF_MISC_WR_CG, 0x000c0000, 0x000c0000,
900 	mmMC_CITF_MISC_RD_CG, 0x000c0000, 0x000c0000,
901 	mmHDP_MEM_POWER_LS, 0x00000001, 0x00000001,
902 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
903 	mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
904 	0x3430, 0xfffffff0, 0x00000100,
905 	0x3630, 0xfffffff0, 0x00000100,
906 };
907 
si_pcie_rreg(struct amdgpu_device * adev,u32 reg)908 static u32 si_pcie_rreg(struct amdgpu_device *adev, u32 reg)
909 {
910 	unsigned long flags;
911 	u32 r;
912 
913 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
914 	WREG32(AMDGPU_PCIE_INDEX, reg);
915 	(void)RREG32(AMDGPU_PCIE_INDEX);
916 	r = RREG32(AMDGPU_PCIE_DATA);
917 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
918 	return r;
919 }
920 
si_pcie_wreg(struct amdgpu_device * adev,u32 reg,u32 v)921 static void si_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
922 {
923 	unsigned long flags;
924 
925 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
926 	WREG32(AMDGPU_PCIE_INDEX, reg);
927 	(void)RREG32(AMDGPU_PCIE_INDEX);
928 	WREG32(AMDGPU_PCIE_DATA, v);
929 	(void)RREG32(AMDGPU_PCIE_DATA);
930 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
931 }
932 
si_pciep_rreg(struct amdgpu_device * adev,u32 reg)933 static u32 si_pciep_rreg(struct amdgpu_device *adev, u32 reg)
934 {
935 	unsigned long flags;
936 	u32 r;
937 
938 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
939 	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
940 	(void)RREG32(PCIE_PORT_INDEX);
941 	r = RREG32(PCIE_PORT_DATA);
942 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
943 	return r;
944 }
945 
si_pciep_wreg(struct amdgpu_device * adev,u32 reg,u32 v)946 static void si_pciep_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
947 {
948 	unsigned long flags;
949 
950 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
951 	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
952 	(void)RREG32(PCIE_PORT_INDEX);
953 	WREG32(PCIE_PORT_DATA, (v));
954 	(void)RREG32(PCIE_PORT_DATA);
955 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
956 }
957 
si_smc_rreg(struct amdgpu_device * adev,u32 reg)958 static u32 si_smc_rreg(struct amdgpu_device *adev, u32 reg)
959 {
960 	unsigned long flags;
961 	u32 r;
962 
963 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
964 	WREG32(SMC_IND_INDEX_0, (reg));
965 	r = RREG32(SMC_IND_DATA_0);
966 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
967 	return r;
968 }
969 
si_smc_wreg(struct amdgpu_device * adev,u32 reg,u32 v)970 static void si_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
971 {
972 	unsigned long flags;
973 
974 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
975 	WREG32(SMC_IND_INDEX_0, (reg));
976 	WREG32(SMC_IND_DATA_0, (v));
977 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
978 }
979 
si_uvd_ctx_rreg(struct amdgpu_device * adev,u32 reg)980 static u32 si_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
981 {
982 	unsigned long flags;
983 	u32 r;
984 
985 	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
986 	WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
987 	r = RREG32(mmUVD_CTX_DATA);
988 	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
989 	return r;
990 }
991 
si_uvd_ctx_wreg(struct amdgpu_device * adev,u32 reg,u32 v)992 static void si_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
993 {
994 	unsigned long flags;
995 
996 	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
997 	WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
998 	WREG32(mmUVD_CTX_DATA, (v));
999 	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
1000 }
1001 
1002 static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = {
1003 	{GRBM_STATUS},
1004 	{mmGRBM_STATUS2},
1005 	{mmGRBM_STATUS_SE0},
1006 	{mmGRBM_STATUS_SE1},
1007 	{mmSRBM_STATUS},
1008 	{mmSRBM_STATUS2},
1009 	{DMA_STATUS_REG + DMA0_REGISTER_OFFSET},
1010 	{DMA_STATUS_REG + DMA1_REGISTER_OFFSET},
1011 	{mmCP_STAT},
1012 	{mmCP_STALLED_STAT1},
1013 	{mmCP_STALLED_STAT2},
1014 	{mmCP_STALLED_STAT3},
1015 	{GB_ADDR_CONFIG},
1016 	{MC_ARB_RAMCFG},
1017 	{GB_TILE_MODE0},
1018 	{GB_TILE_MODE1},
1019 	{GB_TILE_MODE2},
1020 	{GB_TILE_MODE3},
1021 	{GB_TILE_MODE4},
1022 	{GB_TILE_MODE5},
1023 	{GB_TILE_MODE6},
1024 	{GB_TILE_MODE7},
1025 	{GB_TILE_MODE8},
1026 	{GB_TILE_MODE9},
1027 	{GB_TILE_MODE10},
1028 	{GB_TILE_MODE11},
1029 	{GB_TILE_MODE12},
1030 	{GB_TILE_MODE13},
1031 	{GB_TILE_MODE14},
1032 	{GB_TILE_MODE15},
1033 	{GB_TILE_MODE16},
1034 	{GB_TILE_MODE17},
1035 	{GB_TILE_MODE18},
1036 	{GB_TILE_MODE19},
1037 	{GB_TILE_MODE20},
1038 	{GB_TILE_MODE21},
1039 	{GB_TILE_MODE22},
1040 	{GB_TILE_MODE23},
1041 	{GB_TILE_MODE24},
1042 	{GB_TILE_MODE25},
1043 	{GB_TILE_MODE26},
1044 	{GB_TILE_MODE27},
1045 	{GB_TILE_MODE28},
1046 	{GB_TILE_MODE29},
1047 	{GB_TILE_MODE30},
1048 	{GB_TILE_MODE31},
1049 	{CC_RB_BACKEND_DISABLE, true},
1050 	{GC_USER_RB_BACKEND_DISABLE, true},
1051 	{PA_SC_RASTER_CONFIG, true},
1052 };
1053 
si_get_register_value(struct amdgpu_device * adev,bool indexed,u32 se_num,u32 sh_num,u32 reg_offset)1054 static uint32_t si_get_register_value(struct amdgpu_device *adev,
1055 				      bool indexed, u32 se_num,
1056 				      u32 sh_num, u32 reg_offset)
1057 {
1058 	if (indexed) {
1059 		uint32_t val;
1060 		unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
1061 		unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
1062 
1063 		switch (reg_offset) {
1064 		case mmCC_RB_BACKEND_DISABLE:
1065 			return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
1066 		case mmGC_USER_RB_BACKEND_DISABLE:
1067 			return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
1068 		case mmPA_SC_RASTER_CONFIG:
1069 			return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
1070 		}
1071 
1072 		mutex_lock(&adev->grbm_idx_mutex);
1073 		if (se_num != 0xffffffff || sh_num != 0xffffffff)
1074 			amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
1075 
1076 		val = RREG32(reg_offset);
1077 
1078 		if (se_num != 0xffffffff || sh_num != 0xffffffff)
1079 			amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1080 		mutex_unlock(&adev->grbm_idx_mutex);
1081 		return val;
1082 	} else {
1083 		unsigned idx;
1084 
1085 		switch (reg_offset) {
1086 		case mmGB_ADDR_CONFIG:
1087 			return adev->gfx.config.gb_addr_config;
1088 		case mmMC_ARB_RAMCFG:
1089 			return adev->gfx.config.mc_arb_ramcfg;
1090 		case mmGB_TILE_MODE0:
1091 		case mmGB_TILE_MODE1:
1092 		case mmGB_TILE_MODE2:
1093 		case mmGB_TILE_MODE3:
1094 		case mmGB_TILE_MODE4:
1095 		case mmGB_TILE_MODE5:
1096 		case mmGB_TILE_MODE6:
1097 		case mmGB_TILE_MODE7:
1098 		case mmGB_TILE_MODE8:
1099 		case mmGB_TILE_MODE9:
1100 		case mmGB_TILE_MODE10:
1101 		case mmGB_TILE_MODE11:
1102 		case mmGB_TILE_MODE12:
1103 		case mmGB_TILE_MODE13:
1104 		case mmGB_TILE_MODE14:
1105 		case mmGB_TILE_MODE15:
1106 		case mmGB_TILE_MODE16:
1107 		case mmGB_TILE_MODE17:
1108 		case mmGB_TILE_MODE18:
1109 		case mmGB_TILE_MODE19:
1110 		case mmGB_TILE_MODE20:
1111 		case mmGB_TILE_MODE21:
1112 		case mmGB_TILE_MODE22:
1113 		case mmGB_TILE_MODE23:
1114 		case mmGB_TILE_MODE24:
1115 		case mmGB_TILE_MODE25:
1116 		case mmGB_TILE_MODE26:
1117 		case mmGB_TILE_MODE27:
1118 		case mmGB_TILE_MODE28:
1119 		case mmGB_TILE_MODE29:
1120 		case mmGB_TILE_MODE30:
1121 		case mmGB_TILE_MODE31:
1122 			idx = (reg_offset - mmGB_TILE_MODE0);
1123 			return adev->gfx.config.tile_mode_array[idx];
1124 		default:
1125 			return RREG32(reg_offset);
1126 		}
1127 	}
1128 }
si_read_register(struct amdgpu_device * adev,u32 se_num,u32 sh_num,u32 reg_offset,u32 * value)1129 static int si_read_register(struct amdgpu_device *adev, u32 se_num,
1130 			     u32 sh_num, u32 reg_offset, u32 *value)
1131 {
1132 	uint32_t i;
1133 
1134 	*value = 0;
1135 	for (i = 0; i < ARRAY_SIZE(si_allowed_read_registers); i++) {
1136 		bool indexed = si_allowed_read_registers[i].grbm_indexed;
1137 
1138 		if (reg_offset != si_allowed_read_registers[i].reg_offset)
1139 			continue;
1140 
1141 		*value = si_get_register_value(adev, indexed, se_num, sh_num,
1142 					       reg_offset);
1143 		return 0;
1144 	}
1145 	return -EINVAL;
1146 }
1147 
si_read_disabled_bios(struct amdgpu_device * adev)1148 static bool si_read_disabled_bios(struct amdgpu_device *adev)
1149 {
1150 	u32 bus_cntl;
1151 	u32 d1vga_control = 0;
1152 	u32 d2vga_control = 0;
1153 	u32 vga_render_control = 0;
1154 	u32 rom_cntl;
1155 	bool r;
1156 
1157 	bus_cntl = RREG32(R600_BUS_CNTL);
1158 	if (adev->mode_info.num_crtc) {
1159 		d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
1160 		d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
1161 		vga_render_control = RREG32(VGA_RENDER_CONTROL);
1162 	}
1163 	rom_cntl = RREG32(R600_ROM_CNTL);
1164 
1165 	/* enable the rom */
1166 	WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
1167 	if (adev->mode_info.num_crtc) {
1168 		/* Disable VGA mode */
1169 		WREG32(AVIVO_D1VGA_CONTROL,
1170 		       (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
1171 					  AVIVO_DVGA_CONTROL_TIMING_SELECT)));
1172 		WREG32(AVIVO_D2VGA_CONTROL,
1173 		       (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
1174 					  AVIVO_DVGA_CONTROL_TIMING_SELECT)));
1175 		WREG32(VGA_RENDER_CONTROL,
1176 		       (vga_render_control & C_000300_VGA_VSTATUS_CNTL));
1177 	}
1178 	WREG32(R600_ROM_CNTL, rom_cntl | R600_SCK_OVERWRITE);
1179 
1180 	r = amdgpu_read_bios(adev);
1181 
1182 	/* restore regs */
1183 	WREG32(R600_BUS_CNTL, bus_cntl);
1184 	if (adev->mode_info.num_crtc) {
1185 		WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
1186 		WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
1187 		WREG32(VGA_RENDER_CONTROL, vga_render_control);
1188 	}
1189 	WREG32(R600_ROM_CNTL, rom_cntl);
1190 	return r;
1191 }
1192 
1193 #define mmROM_INDEX 0x2A
1194 #define mmROM_DATA  0x2B
1195 
si_read_bios_from_rom(struct amdgpu_device * adev,u8 * bios,u32 length_bytes)1196 static bool si_read_bios_from_rom(struct amdgpu_device *adev,
1197 				  u8 *bios, u32 length_bytes)
1198 {
1199 	u32 *dw_ptr;
1200 	u32 i, length_dw;
1201 
1202 	if (bios == NULL)
1203 		return false;
1204 	if (length_bytes == 0)
1205 		return false;
1206 	/* APU vbios image is part of sbios image */
1207 	if (adev->flags & AMD_IS_APU)
1208 		return false;
1209 
1210 	dw_ptr = (u32 *)bios;
1211 	length_dw = ALIGN(length_bytes, 4) / 4;
1212 	/* set rom index to 0 */
1213 	WREG32(mmROM_INDEX, 0);
1214 	for (i = 0; i < length_dw; i++)
1215 		dw_ptr[i] = RREG32(mmROM_DATA);
1216 
1217 	return true;
1218 }
1219 
si_set_clk_bypass_mode(struct amdgpu_device * adev)1220 static void si_set_clk_bypass_mode(struct amdgpu_device *adev)
1221 {
1222 	u32 tmp, i;
1223 
1224 	tmp = RREG32(CG_SPLL_FUNC_CNTL);
1225 	tmp |= SPLL_BYPASS_EN;
1226 	WREG32(CG_SPLL_FUNC_CNTL, tmp);
1227 
1228 	tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
1229 	tmp |= SPLL_CTLREQ_CHG;
1230 	WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
1231 
1232 	for (i = 0; i < adev->usec_timeout; i++) {
1233 		if (RREG32(SPLL_STATUS) & SPLL_CHG_STATUS)
1234 			break;
1235 		udelay(1);
1236 	}
1237 
1238 	tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
1239 	tmp &= ~(SPLL_CTLREQ_CHG | SCLK_MUX_UPDATE);
1240 	WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
1241 
1242 	tmp = RREG32(MPLL_CNTL_MODE);
1243 	tmp &= ~MPLL_MCLK_SEL;
1244 	WREG32(MPLL_CNTL_MODE, tmp);
1245 }
1246 
si_spll_powerdown(struct amdgpu_device * adev)1247 static void si_spll_powerdown(struct amdgpu_device *adev)
1248 {
1249 	u32 tmp;
1250 
1251 	tmp = RREG32(SPLL_CNTL_MODE);
1252 	tmp |= SPLL_SW_DIR_CONTROL;
1253 	WREG32(SPLL_CNTL_MODE, tmp);
1254 
1255 	tmp = RREG32(CG_SPLL_FUNC_CNTL);
1256 	tmp |= SPLL_RESET;
1257 	WREG32(CG_SPLL_FUNC_CNTL, tmp);
1258 
1259 	tmp = RREG32(CG_SPLL_FUNC_CNTL);
1260 	tmp |= SPLL_SLEEP;
1261 	WREG32(CG_SPLL_FUNC_CNTL, tmp);
1262 
1263 	tmp = RREG32(SPLL_CNTL_MODE);
1264 	tmp &= ~SPLL_SW_DIR_CONTROL;
1265 	WREG32(SPLL_CNTL_MODE, tmp);
1266 }
1267 
si_gpu_pci_config_reset(struct amdgpu_device * adev)1268 static int si_gpu_pci_config_reset(struct amdgpu_device *adev)
1269 {
1270 	u32 i;
1271 	int r = -EINVAL;
1272 
1273 	dev_info(adev->dev, "GPU pci config reset\n");
1274 
1275 	/* set mclk/sclk to bypass */
1276 	si_set_clk_bypass_mode(adev);
1277 	/* powerdown spll */
1278 	si_spll_powerdown(adev);
1279 	/* disable BM */
1280 	pci_clear_master(adev->pdev);
1281 	/* reset */
1282 	amdgpu_device_pci_config_reset(adev);
1283 
1284 	udelay(100);
1285 
1286 	/* wait for asic to come out of reset */
1287 	for (i = 0; i < adev->usec_timeout; i++) {
1288 		if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) {
1289 			/* enable BM */
1290 			pci_set_master(adev->pdev);
1291 			adev->has_hw_reset = true;
1292 			r = 0;
1293 			break;
1294 		}
1295 		udelay(1);
1296 	}
1297 
1298 	return r;
1299 }
1300 
si_asic_reset(struct amdgpu_device * adev)1301 static int si_asic_reset(struct amdgpu_device *adev)
1302 {
1303 	int r;
1304 
1305 	dev_info(adev->dev, "PCI CONFIG reset\n");
1306 
1307 	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
1308 
1309 	r = si_gpu_pci_config_reset(adev);
1310 
1311 	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
1312 
1313 	return r;
1314 }
1315 
si_asic_supports_baco(struct amdgpu_device * adev)1316 static bool si_asic_supports_baco(struct amdgpu_device *adev)
1317 {
1318 	return false;
1319 }
1320 
1321 static enum amd_reset_method
si_asic_reset_method(struct amdgpu_device * adev)1322 si_asic_reset_method(struct amdgpu_device *adev)
1323 {
1324 	if (amdgpu_reset_method != AMD_RESET_METHOD_LEGACY &&
1325 	    amdgpu_reset_method != -1)
1326 		dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
1327 				  amdgpu_reset_method);
1328 
1329 	return AMD_RESET_METHOD_LEGACY;
1330 }
1331 
si_get_config_memsize(struct amdgpu_device * adev)1332 static u32 si_get_config_memsize(struct amdgpu_device *adev)
1333 {
1334 	return RREG32(mmCONFIG_MEMSIZE);
1335 }
1336 
si_vga_set_state(struct amdgpu_device * adev,bool state)1337 static void si_vga_set_state(struct amdgpu_device *adev, bool state)
1338 {
1339 	uint32_t temp;
1340 
1341 	temp = RREG32(CONFIG_CNTL);
1342 	if (!state) {
1343 		temp &= ~(1<<0);
1344 		temp |= (1<<1);
1345 	} else {
1346 		temp &= ~(1<<1);
1347 	}
1348 	WREG32(CONFIG_CNTL, temp);
1349 }
1350 
si_get_xclk(struct amdgpu_device * adev)1351 static u32 si_get_xclk(struct amdgpu_device *adev)
1352 {
1353         u32 reference_clock = adev->clock.spll.reference_freq;
1354 	u32 tmp;
1355 
1356 	tmp = RREG32(CG_CLKPIN_CNTL_2);
1357 	if (tmp & MUX_TCLK_TO_XCLK)
1358 		return TCLK;
1359 
1360 	tmp = RREG32(CG_CLKPIN_CNTL);
1361 	if (tmp & XTALIN_DIVIDE)
1362 		return reference_clock / 4;
1363 
1364 	return reference_clock;
1365 }
1366 
si_flush_hdp(struct amdgpu_device * adev,struct amdgpu_ring * ring)1367 static void si_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
1368 {
1369 	if (!ring || !ring->funcs->emit_wreg) {
1370 		WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
1371 		RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL);
1372 	} else {
1373 		amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
1374 	}
1375 }
1376 
si_invalidate_hdp(struct amdgpu_device * adev,struct amdgpu_ring * ring)1377 static void si_invalidate_hdp(struct amdgpu_device *adev,
1378 			      struct amdgpu_ring *ring)
1379 {
1380 	if (!ring || !ring->funcs->emit_wreg) {
1381 		WREG32(mmHDP_DEBUG0, 1);
1382 		RREG32(mmHDP_DEBUG0);
1383 	} else {
1384 		amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1);
1385 	}
1386 }
1387 
si_need_full_reset(struct amdgpu_device * adev)1388 static bool si_need_full_reset(struct amdgpu_device *adev)
1389 {
1390 	/* change this when we support soft reset */
1391 	return true;
1392 }
1393 
si_need_reset_on_init(struct amdgpu_device * adev)1394 static bool si_need_reset_on_init(struct amdgpu_device *adev)
1395 {
1396 	return false;
1397 }
1398 
si_get_pcie_lanes(struct amdgpu_device * adev)1399 static int si_get_pcie_lanes(struct amdgpu_device *adev)
1400 {
1401 	u32 link_width_cntl;
1402 
1403 	if (adev->flags & AMD_IS_APU)
1404 		return 0;
1405 
1406 	link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
1407 
1408 	switch ((link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT) {
1409 	case LC_LINK_WIDTH_X1:
1410 		return 1;
1411 	case LC_LINK_WIDTH_X2:
1412 		return 2;
1413 	case LC_LINK_WIDTH_X4:
1414 		return 4;
1415 	case LC_LINK_WIDTH_X8:
1416 		return 8;
1417 	case LC_LINK_WIDTH_X0:
1418 	case LC_LINK_WIDTH_X16:
1419 	default:
1420 		return 16;
1421 	}
1422 }
1423 
si_set_pcie_lanes(struct amdgpu_device * adev,int lanes)1424 static void si_set_pcie_lanes(struct amdgpu_device *adev, int lanes)
1425 {
1426 	u32 link_width_cntl, mask;
1427 
1428 	if (adev->flags & AMD_IS_APU)
1429 		return;
1430 
1431 	switch (lanes) {
1432 	case 0:
1433 		mask = LC_LINK_WIDTH_X0;
1434 		break;
1435 	case 1:
1436 		mask = LC_LINK_WIDTH_X1;
1437 		break;
1438 	case 2:
1439 		mask = LC_LINK_WIDTH_X2;
1440 		break;
1441 	case 4:
1442 		mask = LC_LINK_WIDTH_X4;
1443 		break;
1444 	case 8:
1445 		mask = LC_LINK_WIDTH_X8;
1446 		break;
1447 	case 16:
1448 		mask = LC_LINK_WIDTH_X16;
1449 		break;
1450 	default:
1451 		DRM_ERROR("invalid pcie lane request: %d\n", lanes);
1452 		return;
1453 	}
1454 
1455 	link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
1456 	link_width_cntl &= ~LC_LINK_WIDTH_MASK;
1457 	link_width_cntl |= mask << LC_LINK_WIDTH_SHIFT;
1458 	link_width_cntl |= (LC_RECONFIG_NOW |
1459 			    LC_RECONFIG_ARC_MISSING_ESCAPE);
1460 
1461 	WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
1462 }
1463 
si_get_pcie_usage(struct amdgpu_device * adev,uint64_t * count0,uint64_t * count1)1464 static void si_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
1465 			      uint64_t *count1)
1466 {
1467 	uint32_t perfctr = 0;
1468 	uint64_t cnt0_of, cnt1_of;
1469 	int tmp;
1470 
1471 	/* This reports 0 on APUs, so return to avoid writing/reading registers
1472 	 * that may or may not be different from their GPU counterparts
1473 	 */
1474 	if (adev->flags & AMD_IS_APU)
1475 		return;
1476 
1477 	/* Set the 2 events that we wish to watch, defined above */
1478 	/* Reg 40 is # received msgs, Reg 104 is # of posted requests sent */
1479 	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
1480 	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
1481 
1482 	/* Write to enable desired perf counters */
1483 	WREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK, perfctr);
1484 	/* Zero out and enable the perf counters
1485 	 * Write 0x5:
1486 	 * Bit 0 = Start all counters(1)
1487 	 * Bit 2 = Global counter reset enable(1)
1488 	 */
1489 	WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000005);
1490 
1491 	msleep(1000);
1492 
1493 	/* Load the shadow and disable the perf counters
1494 	 * Write 0x2:
1495 	 * Bit 0 = Stop counters(0)
1496 	 * Bit 1 = Load the shadow counters(1)
1497 	 */
1498 	WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000002);
1499 
1500 	/* Read register values to get any >32bit overflow */
1501 	tmp = RREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK);
1502 	cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
1503 	cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
1504 
1505 	/* Get the values and add the overflow */
1506 	*count0 = RREG32_PCIE(ixPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
1507 	*count1 = RREG32_PCIE(ixPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
1508 }
1509 
si_get_pcie_replay_count(struct amdgpu_device * adev)1510 static uint64_t si_get_pcie_replay_count(struct amdgpu_device *adev)
1511 {
1512 	uint64_t nak_r, nak_g;
1513 
1514 	/* Get the number of NAKs received and generated */
1515 	nak_r = RREG32_PCIE(ixPCIE_RX_NUM_NAK);
1516 	nak_g = RREG32_PCIE(ixPCIE_RX_NUM_NAK_GENERATED);
1517 
1518 	/* Add the total number of NAKs, i.e the number of replays */
1519 	return (nak_r + nak_g);
1520 }
1521 
si_uvd_send_upll_ctlreq(struct amdgpu_device * adev,unsigned cg_upll_func_cntl)1522 static int si_uvd_send_upll_ctlreq(struct amdgpu_device *adev,
1523 				   unsigned cg_upll_func_cntl)
1524 {
1525 	unsigned i;
1526 
1527 	/* Make sure UPLL_CTLREQ is deasserted */
1528 	WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK);
1529 
1530 	mdelay(10);
1531 
1532 	/* Assert UPLL_CTLREQ */
1533 	WREG32_P(cg_upll_func_cntl, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK);
1534 
1535 	/* Wait for CTLACK and CTLACK2 to get asserted */
1536 	for (i = 0; i < SI_MAX_CTLACKS_ASSERTION_WAIT; ++i) {
1537 		uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK;
1538 
1539 		if ((RREG32(cg_upll_func_cntl) & mask) == mask)
1540 			break;
1541 		mdelay(10);
1542 	}
1543 
1544 	/* Deassert UPLL_CTLREQ */
1545 	WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK);
1546 
1547 	if (i == SI_MAX_CTLACKS_ASSERTION_WAIT) {
1548 		DRM_ERROR("Timeout setting UVD clocks!\n");
1549 		return -ETIMEDOUT;
1550 	}
1551 
1552 	return 0;
1553 }
1554 
si_uvd_calc_upll_post_div(unsigned vco_freq,unsigned target_freq,unsigned pd_min,unsigned pd_even)1555 static unsigned si_uvd_calc_upll_post_div(unsigned vco_freq,
1556 					  unsigned target_freq,
1557 					  unsigned pd_min,
1558 					  unsigned pd_even)
1559 {
1560 	unsigned post_div = vco_freq / target_freq;
1561 
1562 	/* Adjust to post divider minimum value */
1563 	if (post_div < pd_min)
1564 		post_div = pd_min;
1565 
1566 	/* We alway need a frequency less than or equal the target */
1567 	if ((vco_freq / post_div) > target_freq)
1568 		post_div += 1;
1569 
1570 	/* Post dividers above a certain value must be even */
1571 	if (post_div > pd_even && post_div % 2)
1572 		post_div += 1;
1573 
1574 	return post_div;
1575 }
1576 
1577 /**
1578  * si_calc_upll_dividers - calc UPLL clock dividers
1579  *
1580  * @adev: amdgpu_device pointer
1581  * @vclk: wanted VCLK
1582  * @dclk: wanted DCLK
1583  * @vco_min: minimum VCO frequency
1584  * @vco_max: maximum VCO frequency
1585  * @fb_factor: factor to multiply vco freq with
1586  * @fb_mask: limit and bitmask for feedback divider
1587  * @pd_min: post divider minimum
1588  * @pd_max: post divider maximum
1589  * @pd_even: post divider must be even above this value
1590  * @optimal_fb_div: resulting feedback divider
1591  * @optimal_vclk_div: resulting vclk post divider
1592  * @optimal_dclk_div: resulting dclk post divider
1593  *
1594  * Calculate dividers for UVDs UPLL (except APUs).
1595  * Returns zero on success; -EINVAL on error.
1596  */
si_calc_upll_dividers(struct amdgpu_device * adev,unsigned vclk,unsigned dclk,unsigned vco_min,unsigned vco_max,unsigned fb_factor,unsigned fb_mask,unsigned pd_min,unsigned pd_max,unsigned pd_even,unsigned * optimal_fb_div,unsigned * optimal_vclk_div,unsigned * optimal_dclk_div)1597 static int si_calc_upll_dividers(struct amdgpu_device *adev,
1598 				 unsigned vclk, unsigned dclk,
1599 				 unsigned vco_min, unsigned vco_max,
1600 				 unsigned fb_factor, unsigned fb_mask,
1601 				 unsigned pd_min, unsigned pd_max,
1602 				 unsigned pd_even,
1603 				 unsigned *optimal_fb_div,
1604 				 unsigned *optimal_vclk_div,
1605 				 unsigned *optimal_dclk_div)
1606 {
1607 	unsigned vco_freq, ref_freq = adev->clock.spll.reference_freq;
1608 
1609 	/* Start off with something large */
1610 	unsigned optimal_score = ~0;
1611 
1612 	/* Loop through vco from low to high */
1613 	vco_min = max(max(vco_min, vclk), dclk);
1614 	for (vco_freq = vco_min; vco_freq <= vco_max; vco_freq += 100) {
1615 		uint64_t fb_div = (uint64_t)vco_freq * fb_factor;
1616 		unsigned vclk_div, dclk_div, score;
1617 
1618 		do_div(fb_div, ref_freq);
1619 
1620 		/* fb div out of range ? */
1621 		if (fb_div > fb_mask)
1622 			break; /* It can oly get worse */
1623 
1624 		fb_div &= fb_mask;
1625 
1626 		/* Calc vclk divider with current vco freq */
1627 		vclk_div = si_uvd_calc_upll_post_div(vco_freq, vclk,
1628 						     pd_min, pd_even);
1629 		if (vclk_div > pd_max)
1630 			break; /* vco is too big, it has to stop */
1631 
1632 		/* Calc dclk divider with current vco freq */
1633 		dclk_div = si_uvd_calc_upll_post_div(vco_freq, dclk,
1634 						     pd_min, pd_even);
1635 		if (dclk_div > pd_max)
1636 			break; /* vco is too big, it has to stop */
1637 
1638 		/* Calc score with current vco freq */
1639 		score = vclk - (vco_freq / vclk_div) + dclk - (vco_freq / dclk_div);
1640 
1641 		/* Determine if this vco setting is better than current optimal settings */
1642 		if (score < optimal_score) {
1643 			*optimal_fb_div = fb_div;
1644 			*optimal_vclk_div = vclk_div;
1645 			*optimal_dclk_div = dclk_div;
1646 			optimal_score = score;
1647 			if (optimal_score == 0)
1648 				break; /* It can't get better than this */
1649 		}
1650 	}
1651 
1652 	/* Did we found a valid setup ? */
1653 	if (optimal_score == ~0)
1654 		return -EINVAL;
1655 
1656 	return 0;
1657 }
1658 
si_set_uvd_clocks(struct amdgpu_device * adev,u32 vclk,u32 dclk)1659 static int si_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
1660 {
1661 	unsigned fb_div = 0, vclk_div = 0, dclk_div = 0;
1662 	int r;
1663 
1664 	/* Bypass vclk and dclk with bclk */
1665 	WREG32_P(CG_UPLL_FUNC_CNTL_2,
1666 		 VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
1667 		 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
1668 
1669 	/* Put PLL in bypass mode */
1670 	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
1671 
1672 	if (!vclk || !dclk) {
1673 		/* Keep the Bypass mode */
1674 		return 0;
1675 	}
1676 
1677 	r = si_calc_upll_dividers(adev, vclk, dclk, 125000, 250000,
1678 				  16384, 0x03FFFFFF, 0, 128, 5,
1679 				  &fb_div, &vclk_div, &dclk_div);
1680 	if (r)
1681 		return r;
1682 
1683 	/* Set RESET_ANTI_MUX to 0 */
1684 	WREG32_P(CG_UPLL_FUNC_CNTL_5, 0, ~RESET_ANTI_MUX_MASK);
1685 
1686 	/* Set VCO_MODE to 1 */
1687 	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK);
1688 
1689 	/* Disable sleep mode */
1690 	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK);
1691 
1692 	/* Deassert UPLL_RESET */
1693 	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
1694 
1695 	mdelay(1);
1696 
1697 	r = si_uvd_send_upll_ctlreq(adev, CG_UPLL_FUNC_CNTL);
1698 	if (r)
1699 		return r;
1700 
1701 	/* Assert UPLL_RESET again */
1702 	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
1703 
1704 	/* Disable spread spectrum. */
1705 	WREG32_P(CG_UPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK);
1706 
1707 	/* Set feedback divider */
1708 	WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div), ~UPLL_FB_DIV_MASK);
1709 
1710 	/* Set ref divider to 0 */
1711 	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_REF_DIV_MASK);
1712 
1713 	if (fb_div < 307200)
1714 		WREG32_P(CG_UPLL_FUNC_CNTL_4, 0, ~UPLL_SPARE_ISPARE9);
1715 	else
1716 		WREG32_P(CG_UPLL_FUNC_CNTL_4,
1717 			 UPLL_SPARE_ISPARE9,
1718 			 ~UPLL_SPARE_ISPARE9);
1719 
1720 	/* Set PDIV_A and PDIV_B */
1721 	WREG32_P(CG_UPLL_FUNC_CNTL_2,
1722 		 UPLL_PDIV_A(vclk_div) | UPLL_PDIV_B(dclk_div),
1723 		 ~(UPLL_PDIV_A_MASK | UPLL_PDIV_B_MASK));
1724 
1725 	/* Give the PLL some time to settle */
1726 	mdelay(15);
1727 
1728 	/* Deassert PLL_RESET */
1729 	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
1730 
1731 	mdelay(15);
1732 
1733 	/* Switch from bypass mode to normal mode */
1734 	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
1735 
1736 	r = si_uvd_send_upll_ctlreq(adev, CG_UPLL_FUNC_CNTL);
1737 	if (r)
1738 		return r;
1739 
1740 	/* Switch VCLK and DCLK selection */
1741 	WREG32_P(CG_UPLL_FUNC_CNTL_2,
1742 		 VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
1743 		 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
1744 
1745 	mdelay(100);
1746 
1747 	return 0;
1748 }
1749 
si_vce_send_vcepll_ctlreq(struct amdgpu_device * adev)1750 static int si_vce_send_vcepll_ctlreq(struct amdgpu_device *adev)
1751 {
1752 	unsigned i;
1753 
1754 	/* Make sure VCEPLL_CTLREQ is deasserted */
1755 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK);
1756 
1757 	mdelay(10);
1758 
1759 	/* Assert UPLL_CTLREQ */
1760 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK);
1761 
1762 	/* Wait for CTLACK and CTLACK2 to get asserted */
1763 	for (i = 0; i < SI_MAX_CTLACKS_ASSERTION_WAIT; ++i) {
1764 		uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK;
1765 
1766 		if ((RREG32_SMC(CG_VCEPLL_FUNC_CNTL) & mask) == mask)
1767 			break;
1768 		mdelay(10);
1769 	}
1770 
1771 	/* Deassert UPLL_CTLREQ */
1772 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK);
1773 
1774 	if (i == SI_MAX_CTLACKS_ASSERTION_WAIT) {
1775 		DRM_ERROR("Timeout setting UVD clocks!\n");
1776 		return -ETIMEDOUT;
1777 	}
1778 
1779 	return 0;
1780 }
1781 
si_set_vce_clocks(struct amdgpu_device * adev,u32 evclk,u32 ecclk)1782 static int si_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
1783 {
1784 	unsigned fb_div = 0, evclk_div = 0, ecclk_div = 0;
1785 	int r;
1786 
1787 	/* Bypass evclk and ecclk with bclk */
1788 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2,
1789 		     EVCLK_SRC_SEL(1) | ECCLK_SRC_SEL(1),
1790 		     ~(EVCLK_SRC_SEL_MASK | ECCLK_SRC_SEL_MASK));
1791 
1792 	/* Put PLL in bypass mode */
1793 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_BYPASS_EN_MASK,
1794 		     ~VCEPLL_BYPASS_EN_MASK);
1795 
1796 	if (!evclk || !ecclk) {
1797 		/* Keep the Bypass mode, put PLL to sleep */
1798 		WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_SLEEP_MASK,
1799 			     ~VCEPLL_SLEEP_MASK);
1800 		return 0;
1801 	}
1802 
1803 	r = si_calc_upll_dividers(adev, evclk, ecclk, 125000, 250000,
1804 				  16384, 0x03FFFFFF, 0, 128, 5,
1805 				  &fb_div, &evclk_div, &ecclk_div);
1806 	if (r)
1807 		return r;
1808 
1809 	/* Set RESET_ANTI_MUX to 0 */
1810 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_5, 0, ~RESET_ANTI_MUX_MASK);
1811 
1812 	/* Set VCO_MODE to 1 */
1813 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_VCO_MODE_MASK,
1814 		     ~VCEPLL_VCO_MODE_MASK);
1815 
1816 	/* Toggle VCEPLL_SLEEP to 1 then back to 0 */
1817 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_SLEEP_MASK,
1818 		     ~VCEPLL_SLEEP_MASK);
1819 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_SLEEP_MASK);
1820 
1821 	/* Deassert VCEPLL_RESET */
1822 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_RESET_MASK);
1823 
1824 	mdelay(1);
1825 
1826 	r = si_vce_send_vcepll_ctlreq(adev);
1827 	if (r)
1828 		return r;
1829 
1830 	/* Assert VCEPLL_RESET again */
1831 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_RESET_MASK, ~VCEPLL_RESET_MASK);
1832 
1833 	/* Disable spread spectrum. */
1834 	WREG32_SMC_P(CG_VCEPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK);
1835 
1836 	/* Set feedback divider */
1837 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_3,
1838 		     VCEPLL_FB_DIV(fb_div),
1839 		     ~VCEPLL_FB_DIV_MASK);
1840 
1841 	/* Set ref divider to 0 */
1842 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_REF_DIV_MASK);
1843 
1844 	/* Set PDIV_A and PDIV_B */
1845 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2,
1846 		     VCEPLL_PDIV_A(evclk_div) | VCEPLL_PDIV_B(ecclk_div),
1847 		     ~(VCEPLL_PDIV_A_MASK | VCEPLL_PDIV_B_MASK));
1848 
1849 	/* Give the PLL some time to settle */
1850 	mdelay(15);
1851 
1852 	/* Deassert PLL_RESET */
1853 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_RESET_MASK);
1854 
1855 	mdelay(15);
1856 
1857 	/* Switch from bypass mode to normal mode */
1858 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_BYPASS_EN_MASK);
1859 
1860 	r = si_vce_send_vcepll_ctlreq(adev);
1861 	if (r)
1862 		return r;
1863 
1864 	/* Switch VCLK and DCLK selection */
1865 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2,
1866 		     EVCLK_SRC_SEL(16) | ECCLK_SRC_SEL(16),
1867 		     ~(EVCLK_SRC_SEL_MASK | ECCLK_SRC_SEL_MASK));
1868 
1869 	mdelay(100);
1870 
1871 	return 0;
1872 }
1873 
si_pre_asic_init(struct amdgpu_device * adev)1874 static void si_pre_asic_init(struct amdgpu_device *adev)
1875 {
1876 }
1877 
1878 static const struct amdgpu_asic_funcs si_asic_funcs =
1879 {
1880 	.read_disabled_bios = &si_read_disabled_bios,
1881 	.read_bios_from_rom = &si_read_bios_from_rom,
1882 	.read_register = &si_read_register,
1883 	.reset = &si_asic_reset,
1884 	.reset_method = &si_asic_reset_method,
1885 	.set_vga_state = &si_vga_set_state,
1886 	.get_xclk = &si_get_xclk,
1887 	.set_uvd_clocks = &si_set_uvd_clocks,
1888 	.set_vce_clocks = &si_set_vce_clocks,
1889 	.get_pcie_lanes = &si_get_pcie_lanes,
1890 	.set_pcie_lanes = &si_set_pcie_lanes,
1891 	.get_config_memsize = &si_get_config_memsize,
1892 	.flush_hdp = &si_flush_hdp,
1893 	.invalidate_hdp = &si_invalidate_hdp,
1894 	.need_full_reset = &si_need_full_reset,
1895 	.get_pcie_usage = &si_get_pcie_usage,
1896 	.need_reset_on_init = &si_need_reset_on_init,
1897 	.get_pcie_replay_count = &si_get_pcie_replay_count,
1898 	.supports_baco = &si_asic_supports_baco,
1899 	.pre_asic_init = &si_pre_asic_init,
1900 };
1901 
si_get_rev_id(struct amdgpu_device * adev)1902 static uint32_t si_get_rev_id(struct amdgpu_device *adev)
1903 {
1904 	return (RREG32(CC_DRM_ID_STRAPS) & CC_DRM_ID_STRAPS__ATI_REV_ID_MASK)
1905 		>> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
1906 }
1907 
si_common_early_init(void * handle)1908 static int si_common_early_init(void *handle)
1909 {
1910 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1911 
1912 	adev->smc_rreg = &si_smc_rreg;
1913 	adev->smc_wreg = &si_smc_wreg;
1914 	adev->pcie_rreg = &si_pcie_rreg;
1915 	adev->pcie_wreg = &si_pcie_wreg;
1916 	adev->pciep_rreg = &si_pciep_rreg;
1917 	adev->pciep_wreg = &si_pciep_wreg;
1918 	adev->uvd_ctx_rreg = si_uvd_ctx_rreg;
1919 	adev->uvd_ctx_wreg = si_uvd_ctx_wreg;
1920 	adev->didt_rreg = NULL;
1921 	adev->didt_wreg = NULL;
1922 
1923 	adev->asic_funcs = &si_asic_funcs;
1924 
1925 	adev->rev_id = si_get_rev_id(adev);
1926 	adev->external_rev_id = 0xFF;
1927 	switch (adev->asic_type) {
1928 	case CHIP_TAHITI:
1929 		adev->cg_flags =
1930 			AMD_CG_SUPPORT_GFX_MGCG |
1931 			AMD_CG_SUPPORT_GFX_MGLS |
1932 			/*AMD_CG_SUPPORT_GFX_CGCG |*/
1933 			AMD_CG_SUPPORT_GFX_CGLS |
1934 			AMD_CG_SUPPORT_GFX_CGTS |
1935 			AMD_CG_SUPPORT_GFX_CP_LS |
1936 			AMD_CG_SUPPORT_MC_MGCG |
1937 			AMD_CG_SUPPORT_SDMA_MGCG |
1938 			AMD_CG_SUPPORT_BIF_LS |
1939 			AMD_CG_SUPPORT_VCE_MGCG |
1940 			AMD_CG_SUPPORT_UVD_MGCG |
1941 			AMD_CG_SUPPORT_HDP_LS |
1942 			AMD_CG_SUPPORT_HDP_MGCG;
1943 		adev->pg_flags = 0;
1944 		adev->external_rev_id = (adev->rev_id == 0) ? 1 :
1945 					(adev->rev_id == 1) ? 5 : 6;
1946 		break;
1947 	case CHIP_PITCAIRN:
1948 		adev->cg_flags =
1949 			AMD_CG_SUPPORT_GFX_MGCG |
1950 			AMD_CG_SUPPORT_GFX_MGLS |
1951 			/*AMD_CG_SUPPORT_GFX_CGCG |*/
1952 			AMD_CG_SUPPORT_GFX_CGLS |
1953 			AMD_CG_SUPPORT_GFX_CGTS |
1954 			AMD_CG_SUPPORT_GFX_CP_LS |
1955 			AMD_CG_SUPPORT_GFX_RLC_LS |
1956 			AMD_CG_SUPPORT_MC_LS |
1957 			AMD_CG_SUPPORT_MC_MGCG |
1958 			AMD_CG_SUPPORT_SDMA_MGCG |
1959 			AMD_CG_SUPPORT_BIF_LS |
1960 			AMD_CG_SUPPORT_VCE_MGCG |
1961 			AMD_CG_SUPPORT_UVD_MGCG |
1962 			AMD_CG_SUPPORT_HDP_LS |
1963 			AMD_CG_SUPPORT_HDP_MGCG;
1964 		adev->pg_flags = 0;
1965 		adev->external_rev_id = adev->rev_id + 20;
1966 		break;
1967 
1968 	case CHIP_VERDE:
1969 		adev->cg_flags =
1970 			AMD_CG_SUPPORT_GFX_MGCG |
1971 			AMD_CG_SUPPORT_GFX_MGLS |
1972 			AMD_CG_SUPPORT_GFX_CGLS |
1973 			AMD_CG_SUPPORT_GFX_CGTS |
1974 			AMD_CG_SUPPORT_GFX_CGTS_LS |
1975 			AMD_CG_SUPPORT_GFX_CP_LS |
1976 			AMD_CG_SUPPORT_MC_LS |
1977 			AMD_CG_SUPPORT_MC_MGCG |
1978 			AMD_CG_SUPPORT_SDMA_MGCG |
1979 			AMD_CG_SUPPORT_SDMA_LS |
1980 			AMD_CG_SUPPORT_BIF_LS |
1981 			AMD_CG_SUPPORT_VCE_MGCG |
1982 			AMD_CG_SUPPORT_UVD_MGCG |
1983 			AMD_CG_SUPPORT_HDP_LS |
1984 			AMD_CG_SUPPORT_HDP_MGCG;
1985 		adev->pg_flags = 0;
1986 		//???
1987 		adev->external_rev_id = adev->rev_id + 40;
1988 		break;
1989 	case CHIP_OLAND:
1990 		adev->cg_flags =
1991 			AMD_CG_SUPPORT_GFX_MGCG |
1992 			AMD_CG_SUPPORT_GFX_MGLS |
1993 			/*AMD_CG_SUPPORT_GFX_CGCG |*/
1994 			AMD_CG_SUPPORT_GFX_CGLS |
1995 			AMD_CG_SUPPORT_GFX_CGTS |
1996 			AMD_CG_SUPPORT_GFX_CP_LS |
1997 			AMD_CG_SUPPORT_GFX_RLC_LS |
1998 			AMD_CG_SUPPORT_MC_LS |
1999 			AMD_CG_SUPPORT_MC_MGCG |
2000 			AMD_CG_SUPPORT_SDMA_MGCG |
2001 			AMD_CG_SUPPORT_BIF_LS |
2002 			AMD_CG_SUPPORT_UVD_MGCG |
2003 			AMD_CG_SUPPORT_HDP_LS |
2004 			AMD_CG_SUPPORT_HDP_MGCG;
2005 		adev->pg_flags = 0;
2006 		adev->external_rev_id = 60;
2007 		break;
2008 	case CHIP_HAINAN:
2009 		adev->cg_flags =
2010 			AMD_CG_SUPPORT_GFX_MGCG |
2011 			AMD_CG_SUPPORT_GFX_MGLS |
2012 			/*AMD_CG_SUPPORT_GFX_CGCG |*/
2013 			AMD_CG_SUPPORT_GFX_CGLS |
2014 			AMD_CG_SUPPORT_GFX_CGTS |
2015 			AMD_CG_SUPPORT_GFX_CP_LS |
2016 			AMD_CG_SUPPORT_GFX_RLC_LS |
2017 			AMD_CG_SUPPORT_MC_LS |
2018 			AMD_CG_SUPPORT_MC_MGCG |
2019 			AMD_CG_SUPPORT_SDMA_MGCG |
2020 			AMD_CG_SUPPORT_BIF_LS |
2021 			AMD_CG_SUPPORT_HDP_LS |
2022 			AMD_CG_SUPPORT_HDP_MGCG;
2023 		adev->pg_flags = 0;
2024 		adev->external_rev_id = 70;
2025 		break;
2026 
2027 	default:
2028 		return -EINVAL;
2029 	}
2030 
2031 	return 0;
2032 }
2033 
si_common_sw_init(void * handle)2034 static int si_common_sw_init(void *handle)
2035 {
2036 	return 0;
2037 }
2038 
si_common_sw_fini(void * handle)2039 static int si_common_sw_fini(void *handle)
2040 {
2041 	return 0;
2042 }
2043 
2044 
si_init_golden_registers(struct amdgpu_device * adev)2045 static void si_init_golden_registers(struct amdgpu_device *adev)
2046 {
2047 	switch (adev->asic_type) {
2048 	case CHIP_TAHITI:
2049 		amdgpu_device_program_register_sequence(adev,
2050 							tahiti_golden_registers,
2051 							ARRAY_SIZE(tahiti_golden_registers));
2052 		amdgpu_device_program_register_sequence(adev,
2053 							tahiti_golden_rlc_registers,
2054 							ARRAY_SIZE(tahiti_golden_rlc_registers));
2055 		amdgpu_device_program_register_sequence(adev,
2056 							tahiti_mgcg_cgcg_init,
2057 							ARRAY_SIZE(tahiti_mgcg_cgcg_init));
2058 		amdgpu_device_program_register_sequence(adev,
2059 							tahiti_golden_registers2,
2060 							ARRAY_SIZE(tahiti_golden_registers2));
2061 		break;
2062 	case CHIP_PITCAIRN:
2063 		amdgpu_device_program_register_sequence(adev,
2064 							pitcairn_golden_registers,
2065 							ARRAY_SIZE(pitcairn_golden_registers));
2066 		amdgpu_device_program_register_sequence(adev,
2067 							pitcairn_golden_rlc_registers,
2068 							ARRAY_SIZE(pitcairn_golden_rlc_registers));
2069 		amdgpu_device_program_register_sequence(adev,
2070 							pitcairn_mgcg_cgcg_init,
2071 							ARRAY_SIZE(pitcairn_mgcg_cgcg_init));
2072 		break;
2073 	case CHIP_VERDE:
2074 		amdgpu_device_program_register_sequence(adev,
2075 							verde_golden_registers,
2076 							ARRAY_SIZE(verde_golden_registers));
2077 		amdgpu_device_program_register_sequence(adev,
2078 							verde_golden_rlc_registers,
2079 							ARRAY_SIZE(verde_golden_rlc_registers));
2080 		amdgpu_device_program_register_sequence(adev,
2081 							verde_mgcg_cgcg_init,
2082 							ARRAY_SIZE(verde_mgcg_cgcg_init));
2083 		amdgpu_device_program_register_sequence(adev,
2084 							verde_pg_init,
2085 							ARRAY_SIZE(verde_pg_init));
2086 		break;
2087 	case CHIP_OLAND:
2088 		amdgpu_device_program_register_sequence(adev,
2089 							oland_golden_registers,
2090 							ARRAY_SIZE(oland_golden_registers));
2091 		amdgpu_device_program_register_sequence(adev,
2092 							oland_golden_rlc_registers,
2093 							ARRAY_SIZE(oland_golden_rlc_registers));
2094 		amdgpu_device_program_register_sequence(adev,
2095 							oland_mgcg_cgcg_init,
2096 							ARRAY_SIZE(oland_mgcg_cgcg_init));
2097 		break;
2098 	case CHIP_HAINAN:
2099 		amdgpu_device_program_register_sequence(adev,
2100 							hainan_golden_registers,
2101 							ARRAY_SIZE(hainan_golden_registers));
2102 		amdgpu_device_program_register_sequence(adev,
2103 							hainan_golden_registers2,
2104 							ARRAY_SIZE(hainan_golden_registers2));
2105 		amdgpu_device_program_register_sequence(adev,
2106 							hainan_mgcg_cgcg_init,
2107 							ARRAY_SIZE(hainan_mgcg_cgcg_init));
2108 		break;
2109 
2110 
2111 	default:
2112 		BUG();
2113 	}
2114 }
2115 
si_pcie_gen3_enable(struct amdgpu_device * adev)2116 static void si_pcie_gen3_enable(struct amdgpu_device *adev)
2117 {
2118 	struct pci_dev *root = adev->pdev->bus->self;
2119 	u32 speed_cntl, current_data_rate;
2120 	int i;
2121 	u16 tmp16;
2122 
2123 	if (pci_is_root_bus(adev->pdev->bus))
2124 		return;
2125 
2126 	if (amdgpu_pcie_gen2 == 0)
2127 		return;
2128 
2129 	if (adev->flags & AMD_IS_APU)
2130 		return;
2131 
2132 	if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
2133 					CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
2134 		return;
2135 
2136 	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
2137 	current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
2138 		LC_CURRENT_DATA_RATE_SHIFT;
2139 	if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
2140 		if (current_data_rate == 2) {
2141 			DRM_INFO("PCIE gen 3 link speeds already enabled\n");
2142 			return;
2143 		}
2144 		DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n");
2145 	} else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) {
2146 		if (current_data_rate == 1) {
2147 			DRM_INFO("PCIE gen 2 link speeds already enabled\n");
2148 			return;
2149 		}
2150 		DRM_INFO("enabling PCIE gen 2 link speeds, disable with amdgpu.pcie_gen2=0\n");
2151 	}
2152 
2153 	if (!pci_is_pcie(root) || !pci_is_pcie(adev->pdev))
2154 		return;
2155 
2156 	if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
2157 		if (current_data_rate != 2) {
2158 			u16 bridge_cfg, gpu_cfg;
2159 			u16 bridge_cfg2, gpu_cfg2;
2160 			u32 max_lw, current_lw, tmp;
2161 
2162 			pcie_capability_set_word(root, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
2163 			pcie_capability_set_word(adev->pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
2164 
2165 			tmp = RREG32_PCIE(PCIE_LC_STATUS1);
2166 			max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
2167 			current_lw = (tmp & LC_OPERATING_LINK_WIDTH_MASK) >> LC_OPERATING_LINK_WIDTH_SHIFT;
2168 
2169 			if (current_lw < max_lw) {
2170 				tmp = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
2171 				if (tmp & LC_RENEGOTIATION_SUPPORT) {
2172 					tmp &= ~(LC_LINK_WIDTH_MASK | LC_UPCONFIGURE_DIS);
2173 					tmp |= (max_lw << LC_LINK_WIDTH_SHIFT);
2174 					tmp |= LC_UPCONFIGURE_SUPPORT | LC_RENEGOTIATE_EN | LC_RECONFIG_NOW;
2175 					WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, tmp);
2176 				}
2177 			}
2178 
2179 			for (i = 0; i < 10; i++) {
2180 				pcie_capability_read_word(adev->pdev,
2181 							  PCI_EXP_DEVSTA,
2182 							  &tmp16);
2183 				if (tmp16 & PCI_EXP_DEVSTA_TRPND)
2184 					break;
2185 
2186 				pcie_capability_read_word(root, PCI_EXP_LNKCTL,
2187 							  &bridge_cfg);
2188 				pcie_capability_read_word(adev->pdev,
2189 							  PCI_EXP_LNKCTL,
2190 							  &gpu_cfg);
2191 
2192 				pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
2193 							  &bridge_cfg2);
2194 				pcie_capability_read_word(adev->pdev,
2195 							  PCI_EXP_LNKCTL2,
2196 							  &gpu_cfg2);
2197 
2198 				tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
2199 				tmp |= LC_SET_QUIESCE;
2200 				WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
2201 
2202 				tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
2203 				tmp |= LC_REDO_EQ;
2204 				WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
2205 
2206 				mdelay(100);
2207 
2208 				pcie_capability_clear_and_set_word(root, PCI_EXP_LNKCTL,
2209 								   PCI_EXP_LNKCTL_HAWD,
2210 								   bridge_cfg &
2211 								   PCI_EXP_LNKCTL_HAWD);
2212 				pcie_capability_clear_and_set_word(adev->pdev, PCI_EXP_LNKCTL,
2213 								   PCI_EXP_LNKCTL_HAWD,
2214 								   gpu_cfg &
2215 								   PCI_EXP_LNKCTL_HAWD);
2216 
2217 				pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
2218 							  &tmp16);
2219 				tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
2220 					   PCI_EXP_LNKCTL2_TX_MARGIN);
2221 				tmp16 |= (bridge_cfg2 &
2222 					  (PCI_EXP_LNKCTL2_ENTER_COMP |
2223 					   PCI_EXP_LNKCTL2_TX_MARGIN));
2224 				pcie_capability_write_word(root,
2225 							   PCI_EXP_LNKCTL2,
2226 							   tmp16);
2227 
2228 				pcie_capability_read_word(adev->pdev,
2229 							  PCI_EXP_LNKCTL2,
2230 							  &tmp16);
2231 				tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
2232 					   PCI_EXP_LNKCTL2_TX_MARGIN);
2233 				tmp16 |= (gpu_cfg2 &
2234 					  (PCI_EXP_LNKCTL2_ENTER_COMP |
2235 					   PCI_EXP_LNKCTL2_TX_MARGIN));
2236 				pcie_capability_write_word(adev->pdev,
2237 							   PCI_EXP_LNKCTL2,
2238 							   tmp16);
2239 
2240 				tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
2241 				tmp &= ~LC_SET_QUIESCE;
2242 				WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
2243 			}
2244 		}
2245 	}
2246 
2247 	speed_cntl |= LC_FORCE_EN_SW_SPEED_CHANGE | LC_FORCE_DIS_HW_SPEED_CHANGE;
2248 	speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
2249 	WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
2250 
2251 	pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL2, &tmp16);
2252 	tmp16 &= ~PCI_EXP_LNKCTL2_TLS;
2253 
2254 	if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
2255 		tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */
2256 	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
2257 		tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */
2258 	else
2259 		tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */
2260 	pcie_capability_write_word(adev->pdev, PCI_EXP_LNKCTL2, tmp16);
2261 
2262 	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
2263 	speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
2264 	WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
2265 
2266 	for (i = 0; i < adev->usec_timeout; i++) {
2267 		speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
2268 		if ((speed_cntl & LC_INITIATE_LINK_SPEED_CHANGE) == 0)
2269 			break;
2270 		udelay(1);
2271 	}
2272 }
2273 
si_pif_phy0_rreg(struct amdgpu_device * adev,u32 reg)2274 static inline u32 si_pif_phy0_rreg(struct amdgpu_device *adev, u32 reg)
2275 {
2276 	unsigned long flags;
2277 	u32 r;
2278 
2279 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
2280 	WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
2281 	r = RREG32(EVERGREEN_PIF_PHY0_DATA);
2282 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
2283 	return r;
2284 }
2285 
si_pif_phy0_wreg(struct amdgpu_device * adev,u32 reg,u32 v)2286 static inline void si_pif_phy0_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
2287 {
2288 	unsigned long flags;
2289 
2290 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
2291 	WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
2292 	WREG32(EVERGREEN_PIF_PHY0_DATA, (v));
2293 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
2294 }
2295 
si_pif_phy1_rreg(struct amdgpu_device * adev,u32 reg)2296 static inline u32 si_pif_phy1_rreg(struct amdgpu_device *adev, u32 reg)
2297 {
2298 	unsigned long flags;
2299 	u32 r;
2300 
2301 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
2302 	WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
2303 	r = RREG32(EVERGREEN_PIF_PHY1_DATA);
2304 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
2305 	return r;
2306 }
2307 
si_pif_phy1_wreg(struct amdgpu_device * adev,u32 reg,u32 v)2308 static inline void si_pif_phy1_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
2309 {
2310 	unsigned long flags;
2311 
2312 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
2313 	WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
2314 	WREG32(EVERGREEN_PIF_PHY1_DATA, (v));
2315 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
2316 }
si_program_aspm(struct amdgpu_device * adev)2317 static void si_program_aspm(struct amdgpu_device *adev)
2318 {
2319 	u32 data, orig;
2320 	bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false;
2321 	bool disable_clkreq = false;
2322 
2323 	if (amdgpu_aspm == 0)
2324 		return;
2325 
2326 	if (adev->flags & AMD_IS_APU)
2327 		return;
2328 	orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
2329 	data &= ~LC_XMIT_N_FTS_MASK;
2330 	data |= LC_XMIT_N_FTS(0x24) | LC_XMIT_N_FTS_OVERRIDE_EN;
2331 	if (orig != data)
2332 		WREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL, data);
2333 
2334 	orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL3);
2335 	data |= LC_GO_TO_RECOVERY;
2336 	if (orig != data)
2337 		WREG32_PCIE_PORT(PCIE_LC_CNTL3, data);
2338 
2339 	orig = data = RREG32_PCIE(PCIE_P_CNTL);
2340 	data |= P_IGNORE_EDB_ERR;
2341 	if (orig != data)
2342 		WREG32_PCIE(PCIE_P_CNTL, data);
2343 
2344 	orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
2345 	data &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK);
2346 	data |= LC_PMI_TO_L1_DIS;
2347 	if (!disable_l0s)
2348 		data |= LC_L0S_INACTIVITY(7);
2349 
2350 	if (!disable_l1) {
2351 		data |= LC_L1_INACTIVITY(7);
2352 		data &= ~LC_PMI_TO_L1_DIS;
2353 		if (orig != data)
2354 			WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
2355 
2356 		if (!disable_plloff_in_l1) {
2357 			bool clk_req_support;
2358 
2359 			orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_0);
2360 			data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
2361 			data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
2362 			if (orig != data)
2363 				si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_0, data);
2364 
2365 			orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_1);
2366 			data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
2367 			data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
2368 			if (orig != data)
2369 				si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_1, data);
2370 
2371 			orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_0);
2372 			data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
2373 			data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
2374 			if (orig != data)
2375 				si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_0, data);
2376 
2377 			orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_1);
2378 			data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
2379 			data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
2380 			if (orig != data)
2381 				si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_1, data);
2382 
2383 			if ((adev->asic_type != CHIP_OLAND) && (adev->asic_type != CHIP_HAINAN)) {
2384 				orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_0);
2385 				data &= ~PLL_RAMP_UP_TIME_0_MASK;
2386 				if (orig != data)
2387 					si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_0, data);
2388 
2389 				orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_1);
2390 				data &= ~PLL_RAMP_UP_TIME_1_MASK;
2391 				if (orig != data)
2392 					si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_1, data);
2393 
2394 				orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_2);
2395 				data &= ~PLL_RAMP_UP_TIME_2_MASK;
2396 				if (orig != data)
2397 					si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_2, data);
2398 
2399 				orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_3);
2400 				data &= ~PLL_RAMP_UP_TIME_3_MASK;
2401 				if (orig != data)
2402 					si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_3, data);
2403 
2404 				orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_0);
2405 				data &= ~PLL_RAMP_UP_TIME_0_MASK;
2406 				if (orig != data)
2407 					si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_0, data);
2408 
2409 				orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_1);
2410 				data &= ~PLL_RAMP_UP_TIME_1_MASK;
2411 				if (orig != data)
2412 					si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_1, data);
2413 
2414 				orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_2);
2415 				data &= ~PLL_RAMP_UP_TIME_2_MASK;
2416 				if (orig != data)
2417 					si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_2, data);
2418 
2419 				orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_3);
2420 				data &= ~PLL_RAMP_UP_TIME_3_MASK;
2421 				if (orig != data)
2422 					si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_3, data);
2423 			}
2424 			orig = data = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
2425 			data &= ~LC_DYN_LANES_PWR_STATE_MASK;
2426 			data |= LC_DYN_LANES_PWR_STATE(3);
2427 			if (orig != data)
2428 				WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
2429 
2430 			orig = data = si_pif_phy0_rreg(adev,PB0_PIF_CNTL);
2431 			data &= ~LS2_EXIT_TIME_MASK;
2432 			if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type == CHIP_HAINAN))
2433 				data |= LS2_EXIT_TIME(5);
2434 			if (orig != data)
2435 				si_pif_phy0_wreg(adev,PB0_PIF_CNTL, data);
2436 
2437 			orig = data = si_pif_phy1_rreg(adev,PB1_PIF_CNTL);
2438 			data &= ~LS2_EXIT_TIME_MASK;
2439 			if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type == CHIP_HAINAN))
2440 				data |= LS2_EXIT_TIME(5);
2441 			if (orig != data)
2442 				si_pif_phy1_wreg(adev,PB1_PIF_CNTL, data);
2443 
2444 			if (!disable_clkreq &&
2445 			    !pci_is_root_bus(adev->pdev->bus)) {
2446 				struct pci_dev *root = adev->pdev->bus->self;
2447 				u32 lnkcap;
2448 
2449 				clk_req_support = false;
2450 				pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
2451 				if (lnkcap & PCI_EXP_LNKCAP_CLKPM)
2452 					clk_req_support = true;
2453 			} else {
2454 				clk_req_support = false;
2455 			}
2456 
2457 			if (clk_req_support) {
2458 				orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL2);
2459 				data |= LC_ALLOW_PDWN_IN_L1 | LC_ALLOW_PDWN_IN_L23;
2460 				if (orig != data)
2461 					WREG32_PCIE_PORT(PCIE_LC_CNTL2, data);
2462 
2463 				orig = data = RREG32(THM_CLK_CNTL);
2464 				data &= ~(CMON_CLK_SEL_MASK | TMON_CLK_SEL_MASK);
2465 				data |= CMON_CLK_SEL(1) | TMON_CLK_SEL(1);
2466 				if (orig != data)
2467 					WREG32(THM_CLK_CNTL, data);
2468 
2469 				orig = data = RREG32(MISC_CLK_CNTL);
2470 				data &= ~(DEEP_SLEEP_CLK_SEL_MASK | ZCLK_SEL_MASK);
2471 				data |= DEEP_SLEEP_CLK_SEL(1) | ZCLK_SEL(1);
2472 				if (orig != data)
2473 					WREG32(MISC_CLK_CNTL, data);
2474 
2475 				orig = data = RREG32(CG_CLKPIN_CNTL);
2476 				data &= ~BCLK_AS_XCLK;
2477 				if (orig != data)
2478 					WREG32(CG_CLKPIN_CNTL, data);
2479 
2480 				orig = data = RREG32(CG_CLKPIN_CNTL_2);
2481 				data &= ~FORCE_BIF_REFCLK_EN;
2482 				if (orig != data)
2483 					WREG32(CG_CLKPIN_CNTL_2, data);
2484 
2485 				orig = data = RREG32(MPLL_BYPASSCLK_SEL);
2486 				data &= ~MPLL_CLKOUT_SEL_MASK;
2487 				data |= MPLL_CLKOUT_SEL(4);
2488 				if (orig != data)
2489 					WREG32(MPLL_BYPASSCLK_SEL, data);
2490 
2491 				orig = data = RREG32(SPLL_CNTL_MODE);
2492 				data &= ~SPLL_REFCLK_SEL_MASK;
2493 				if (orig != data)
2494 					WREG32(SPLL_CNTL_MODE, data);
2495 			}
2496 		}
2497 	} else {
2498 		if (orig != data)
2499 			WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
2500 	}
2501 
2502 	orig = data = RREG32_PCIE(PCIE_CNTL2);
2503 	data |= SLV_MEM_LS_EN | MST_MEM_LS_EN | REPLAY_MEM_LS_EN;
2504 	if (orig != data)
2505 		WREG32_PCIE(PCIE_CNTL2, data);
2506 
2507 	if (!disable_l0s) {
2508 		data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
2509 		if((data & LC_N_FTS_MASK) == LC_N_FTS_MASK) {
2510 			data = RREG32_PCIE(PCIE_LC_STATUS1);
2511 			if ((data & LC_REVERSE_XMIT) && (data & LC_REVERSE_RCVR)) {
2512 				orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
2513 				data &= ~LC_L0S_INACTIVITY_MASK;
2514 				if (orig != data)
2515 					WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
2516 			}
2517 		}
2518 	}
2519 }
2520 
si_fix_pci_max_read_req_size(struct amdgpu_device * adev)2521 static void si_fix_pci_max_read_req_size(struct amdgpu_device *adev)
2522 {
2523 	int readrq;
2524 	u16 v;
2525 
2526 	readrq = pcie_get_readrq(adev->pdev);
2527 	v = ffs(readrq) - 8;
2528 	if ((v == 0) || (v == 6) || (v == 7))
2529 		pcie_set_readrq(adev->pdev, 512);
2530 }
2531 
si_common_hw_init(void * handle)2532 static int si_common_hw_init(void *handle)
2533 {
2534 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2535 
2536 	si_fix_pci_max_read_req_size(adev);
2537 	si_init_golden_registers(adev);
2538 	si_pcie_gen3_enable(adev);
2539 	si_program_aspm(adev);
2540 
2541 	return 0;
2542 }
2543 
si_common_hw_fini(void * handle)2544 static int si_common_hw_fini(void *handle)
2545 {
2546 	return 0;
2547 }
2548 
si_common_suspend(void * handle)2549 static int si_common_suspend(void *handle)
2550 {
2551 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2552 
2553 	return si_common_hw_fini(adev);
2554 }
2555 
si_common_resume(void * handle)2556 static int si_common_resume(void *handle)
2557 {
2558 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2559 
2560 	return si_common_hw_init(adev);
2561 }
2562 
si_common_is_idle(void * handle)2563 static bool si_common_is_idle(void *handle)
2564 {
2565 	return true;
2566 }
2567 
si_common_wait_for_idle(void * handle)2568 static int si_common_wait_for_idle(void *handle)
2569 {
2570 	return 0;
2571 }
2572 
si_common_soft_reset(void * handle)2573 static int si_common_soft_reset(void *handle)
2574 {
2575 	return 0;
2576 }
2577 
si_common_set_clockgating_state(void * handle,enum amd_clockgating_state state)2578 static int si_common_set_clockgating_state(void *handle,
2579 					    enum amd_clockgating_state state)
2580 {
2581 	return 0;
2582 }
2583 
si_common_set_powergating_state(void * handle,enum amd_powergating_state state)2584 static int si_common_set_powergating_state(void *handle,
2585 					    enum amd_powergating_state state)
2586 {
2587 	return 0;
2588 }
2589 
2590 static const struct amd_ip_funcs si_common_ip_funcs = {
2591 	.name = "si_common",
2592 	.early_init = si_common_early_init,
2593 	.late_init = NULL,
2594 	.sw_init = si_common_sw_init,
2595 	.sw_fini = si_common_sw_fini,
2596 	.hw_init = si_common_hw_init,
2597 	.hw_fini = si_common_hw_fini,
2598 	.suspend = si_common_suspend,
2599 	.resume = si_common_resume,
2600 	.is_idle = si_common_is_idle,
2601 	.wait_for_idle = si_common_wait_for_idle,
2602 	.soft_reset = si_common_soft_reset,
2603 	.set_clockgating_state = si_common_set_clockgating_state,
2604 	.set_powergating_state = si_common_set_powergating_state,
2605 };
2606 
2607 static const struct amdgpu_ip_block_version si_common_ip_block =
2608 {
2609 	.type = AMD_IP_BLOCK_TYPE_COMMON,
2610 	.major = 1,
2611 	.minor = 0,
2612 	.rev = 0,
2613 	.funcs = &si_common_ip_funcs,
2614 };
2615 
si_set_ip_blocks(struct amdgpu_device * adev)2616 int si_set_ip_blocks(struct amdgpu_device *adev)
2617 {
2618 	switch (adev->asic_type) {
2619 	case CHIP_VERDE:
2620 	case CHIP_TAHITI:
2621 	case CHIP_PITCAIRN:
2622 		amdgpu_device_ip_block_add(adev, &si_common_ip_block);
2623 		amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block);
2624 		amdgpu_device_ip_block_add(adev, &si_ih_ip_block);
2625 		amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block);
2626 		amdgpu_device_ip_block_add(adev, &si_dma_ip_block);
2627 		amdgpu_device_ip_block_add(adev, &si_smu_ip_block);
2628 		if (adev->enable_virtual_display)
2629 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
2630 #if defined(CONFIG_DRM_AMD_DC) && defined(CONFIG_DRM_AMD_DC_SI)
2631 		else if (amdgpu_device_has_dc_support(adev))
2632 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
2633 #endif
2634 		else
2635 			amdgpu_device_ip_block_add(adev, &dce_v6_0_ip_block);
2636 		amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block);
2637 		/* amdgpu_device_ip_block_add(adev, &vce_v1_0_ip_block); */
2638 		break;
2639 	case CHIP_OLAND:
2640 		amdgpu_device_ip_block_add(adev, &si_common_ip_block);
2641 		amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block);
2642 		amdgpu_device_ip_block_add(adev, &si_ih_ip_block);
2643 		amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block);
2644 		amdgpu_device_ip_block_add(adev, &si_dma_ip_block);
2645 		amdgpu_device_ip_block_add(adev, &si_smu_ip_block);
2646 		if (adev->enable_virtual_display)
2647 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
2648 #if defined(CONFIG_DRM_AMD_DC) && defined(CONFIG_DRM_AMD_DC_SI)
2649 		else if (amdgpu_device_has_dc_support(adev))
2650 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
2651 #endif
2652 		else
2653 			amdgpu_device_ip_block_add(adev, &dce_v6_4_ip_block);
2654 		amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block);
2655 		/* amdgpu_device_ip_block_add(adev, &vce_v1_0_ip_block); */
2656 		break;
2657 	case CHIP_HAINAN:
2658 		amdgpu_device_ip_block_add(adev, &si_common_ip_block);
2659 		amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block);
2660 		amdgpu_device_ip_block_add(adev, &si_ih_ip_block);
2661 		amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block);
2662 		amdgpu_device_ip_block_add(adev, &si_dma_ip_block);
2663 		amdgpu_device_ip_block_add(adev, &si_smu_ip_block);
2664 		if (adev->enable_virtual_display)
2665 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
2666 		break;
2667 	default:
2668 		BUG();
2669 	}
2670 	return 0;
2671 }
2672 
2673