• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2010 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Alex Deucher
23  */
24 
25 #include <linux/firmware.h>
26 #include <linux/pci.h>
27 #include <linux/slab.h>
28 
29 #include <drm/drm_vblank.h>
30 #include <drm/radeon_drm.h>
31 #include <drm/drm_fourcc.h>
32 #include <drm/drm_framebuffer.h>
33 
34 #include "atom.h"
35 #include "avivod.h"
36 #include "cik.h"
37 #include "ni.h"
38 #include "rv770.h"
39 #include "evergreen.h"
40 #include "evergreen_blit_shaders.h"
41 #include "evergreen_reg.h"
42 #include "evergreend.h"
43 #include "radeon.h"
44 #include "radeon_asic.h"
45 #include "radeon_audio.h"
46 #include "radeon_ucode.h"
47 #include "si.h"
48 
49 #define DC_HPDx_CONTROL(x)        (DC_HPD1_CONTROL     + (x * 0xc))
50 #define DC_HPDx_INT_CONTROL(x)    (DC_HPD1_INT_CONTROL + (x * 0xc))
51 #define DC_HPDx_INT_STATUS_REG(x) (DC_HPD1_INT_STATUS  + (x * 0xc))
52 
53 /*
54  * Indirect registers accessor
55  */
eg_cg_rreg(struct radeon_device * rdev,u32 reg)56 u32 eg_cg_rreg(struct radeon_device *rdev, u32 reg)
57 {
58 	unsigned long flags;
59 	u32 r;
60 
61 	spin_lock_irqsave(&rdev->cg_idx_lock, flags);
62 	WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff));
63 	r = RREG32(EVERGREEN_CG_IND_DATA);
64 	spin_unlock_irqrestore(&rdev->cg_idx_lock, flags);
65 	return r;
66 }
67 
eg_cg_wreg(struct radeon_device * rdev,u32 reg,u32 v)68 void eg_cg_wreg(struct radeon_device *rdev, u32 reg, u32 v)
69 {
70 	unsigned long flags;
71 
72 	spin_lock_irqsave(&rdev->cg_idx_lock, flags);
73 	WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff));
74 	WREG32(EVERGREEN_CG_IND_DATA, (v));
75 	spin_unlock_irqrestore(&rdev->cg_idx_lock, flags);
76 }
77 
eg_pif_phy0_rreg(struct radeon_device * rdev,u32 reg)78 u32 eg_pif_phy0_rreg(struct radeon_device *rdev, u32 reg)
79 {
80 	unsigned long flags;
81 	u32 r;
82 
83 	spin_lock_irqsave(&rdev->pif_idx_lock, flags);
84 	WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
85 	r = RREG32(EVERGREEN_PIF_PHY0_DATA);
86 	spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
87 	return r;
88 }
89 
eg_pif_phy0_wreg(struct radeon_device * rdev,u32 reg,u32 v)90 void eg_pif_phy0_wreg(struct radeon_device *rdev, u32 reg, u32 v)
91 {
92 	unsigned long flags;
93 
94 	spin_lock_irqsave(&rdev->pif_idx_lock, flags);
95 	WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
96 	WREG32(EVERGREEN_PIF_PHY0_DATA, (v));
97 	spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
98 }
99 
eg_pif_phy1_rreg(struct radeon_device * rdev,u32 reg)100 u32 eg_pif_phy1_rreg(struct radeon_device *rdev, u32 reg)
101 {
102 	unsigned long flags;
103 	u32 r;
104 
105 	spin_lock_irqsave(&rdev->pif_idx_lock, flags);
106 	WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
107 	r = RREG32(EVERGREEN_PIF_PHY1_DATA);
108 	spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
109 	return r;
110 }
111 
eg_pif_phy1_wreg(struct radeon_device * rdev,u32 reg,u32 v)112 void eg_pif_phy1_wreg(struct radeon_device *rdev, u32 reg, u32 v)
113 {
114 	unsigned long flags;
115 
116 	spin_lock_irqsave(&rdev->pif_idx_lock, flags);
117 	WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
118 	WREG32(EVERGREEN_PIF_PHY1_DATA, (v));
119 	spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
120 }
121 
122 static const u32 crtc_offsets[6] =
123 {
124 	EVERGREEN_CRTC0_REGISTER_OFFSET,
125 	EVERGREEN_CRTC1_REGISTER_OFFSET,
126 	EVERGREEN_CRTC2_REGISTER_OFFSET,
127 	EVERGREEN_CRTC3_REGISTER_OFFSET,
128 	EVERGREEN_CRTC4_REGISTER_OFFSET,
129 	EVERGREEN_CRTC5_REGISTER_OFFSET
130 };
131 
132 #include "clearstate_evergreen.h"
133 
134 static const u32 sumo_rlc_save_restore_register_list[] =
135 {
136 	0x98fc,
137 	0x9830,
138 	0x9834,
139 	0x9838,
140 	0x9870,
141 	0x9874,
142 	0x8a14,
143 	0x8b24,
144 	0x8bcc,
145 	0x8b10,
146 	0x8d00,
147 	0x8d04,
148 	0x8c00,
149 	0x8c04,
150 	0x8c08,
151 	0x8c0c,
152 	0x8d8c,
153 	0x8c20,
154 	0x8c24,
155 	0x8c28,
156 	0x8c18,
157 	0x8c1c,
158 	0x8cf0,
159 	0x8e2c,
160 	0x8e38,
161 	0x8c30,
162 	0x9508,
163 	0x9688,
164 	0x9608,
165 	0x960c,
166 	0x9610,
167 	0x9614,
168 	0x88c4,
169 	0x88d4,
170 	0xa008,
171 	0x900c,
172 	0x9100,
173 	0x913c,
174 	0x98f8,
175 	0x98f4,
176 	0x9b7c,
177 	0x3f8c,
178 	0x8950,
179 	0x8954,
180 	0x8a18,
181 	0x8b28,
182 	0x9144,
183 	0x9148,
184 	0x914c,
185 	0x3f90,
186 	0x3f94,
187 	0x915c,
188 	0x9160,
189 	0x9178,
190 	0x917c,
191 	0x9180,
192 	0x918c,
193 	0x9190,
194 	0x9194,
195 	0x9198,
196 	0x919c,
197 	0x91a8,
198 	0x91ac,
199 	0x91b0,
200 	0x91b4,
201 	0x91b8,
202 	0x91c4,
203 	0x91c8,
204 	0x91cc,
205 	0x91d0,
206 	0x91d4,
207 	0x91e0,
208 	0x91e4,
209 	0x91ec,
210 	0x91f0,
211 	0x91f4,
212 	0x9200,
213 	0x9204,
214 	0x929c,
215 	0x9150,
216 	0x802c,
217 };
218 
219 static void evergreen_gpu_init(struct radeon_device *rdev);
220 void evergreen_fini(struct radeon_device *rdev);
221 void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
222 void evergreen_program_aspm(struct radeon_device *rdev);
223 
224 static const u32 evergreen_golden_registers[] =
225 {
226 	0x3f90, 0xffff0000, 0xff000000,
227 	0x9148, 0xffff0000, 0xff000000,
228 	0x3f94, 0xffff0000, 0xff000000,
229 	0x914c, 0xffff0000, 0xff000000,
230 	0x9b7c, 0xffffffff, 0x00000000,
231 	0x8a14, 0xffffffff, 0x00000007,
232 	0x8b10, 0xffffffff, 0x00000000,
233 	0x960c, 0xffffffff, 0x54763210,
234 	0x88c4, 0xffffffff, 0x000000c2,
235 	0x88d4, 0xffffffff, 0x00000010,
236 	0x8974, 0xffffffff, 0x00000000,
237 	0xc78, 0x00000080, 0x00000080,
238 	0x5eb4, 0xffffffff, 0x00000002,
239 	0x5e78, 0xffffffff, 0x001000f0,
240 	0x6104, 0x01000300, 0x00000000,
241 	0x5bc0, 0x00300000, 0x00000000,
242 	0x7030, 0xffffffff, 0x00000011,
243 	0x7c30, 0xffffffff, 0x00000011,
244 	0x10830, 0xffffffff, 0x00000011,
245 	0x11430, 0xffffffff, 0x00000011,
246 	0x12030, 0xffffffff, 0x00000011,
247 	0x12c30, 0xffffffff, 0x00000011,
248 	0xd02c, 0xffffffff, 0x08421000,
249 	0x240c, 0xffffffff, 0x00000380,
250 	0x8b24, 0xffffffff, 0x00ff0fff,
251 	0x28a4c, 0x06000000, 0x06000000,
252 	0x10c, 0x00000001, 0x00000001,
253 	0x8d00, 0xffffffff, 0x100e4848,
254 	0x8d04, 0xffffffff, 0x00164745,
255 	0x8c00, 0xffffffff, 0xe4000003,
256 	0x8c04, 0xffffffff, 0x40600060,
257 	0x8c08, 0xffffffff, 0x001c001c,
258 	0x8cf0, 0xffffffff, 0x08e00620,
259 	0x8c20, 0xffffffff, 0x00800080,
260 	0x8c24, 0xffffffff, 0x00800080,
261 	0x8c18, 0xffffffff, 0x20202078,
262 	0x8c1c, 0xffffffff, 0x00001010,
263 	0x28350, 0xffffffff, 0x00000000,
264 	0xa008, 0xffffffff, 0x00010000,
265 	0x5c4, 0xffffffff, 0x00000001,
266 	0x9508, 0xffffffff, 0x00000002,
267 	0x913c, 0x0000000f, 0x0000000a
268 };
269 
270 static const u32 evergreen_golden_registers2[] =
271 {
272 	0x2f4c, 0xffffffff, 0x00000000,
273 	0x54f4, 0xffffffff, 0x00000000,
274 	0x54f0, 0xffffffff, 0x00000000,
275 	0x5498, 0xffffffff, 0x00000000,
276 	0x549c, 0xffffffff, 0x00000000,
277 	0x5494, 0xffffffff, 0x00000000,
278 	0x53cc, 0xffffffff, 0x00000000,
279 	0x53c8, 0xffffffff, 0x00000000,
280 	0x53c4, 0xffffffff, 0x00000000,
281 	0x53c0, 0xffffffff, 0x00000000,
282 	0x53bc, 0xffffffff, 0x00000000,
283 	0x53b8, 0xffffffff, 0x00000000,
284 	0x53b4, 0xffffffff, 0x00000000,
285 	0x53b0, 0xffffffff, 0x00000000
286 };
287 
288 static const u32 cypress_mgcg_init[] =
289 {
290 	0x802c, 0xffffffff, 0xc0000000,
291 	0x5448, 0xffffffff, 0x00000100,
292 	0x55e4, 0xffffffff, 0x00000100,
293 	0x160c, 0xffffffff, 0x00000100,
294 	0x5644, 0xffffffff, 0x00000100,
295 	0xc164, 0xffffffff, 0x00000100,
296 	0x8a18, 0xffffffff, 0x00000100,
297 	0x897c, 0xffffffff, 0x06000100,
298 	0x8b28, 0xffffffff, 0x00000100,
299 	0x9144, 0xffffffff, 0x00000100,
300 	0x9a60, 0xffffffff, 0x00000100,
301 	0x9868, 0xffffffff, 0x00000100,
302 	0x8d58, 0xffffffff, 0x00000100,
303 	0x9510, 0xffffffff, 0x00000100,
304 	0x949c, 0xffffffff, 0x00000100,
305 	0x9654, 0xffffffff, 0x00000100,
306 	0x9030, 0xffffffff, 0x00000100,
307 	0x9034, 0xffffffff, 0x00000100,
308 	0x9038, 0xffffffff, 0x00000100,
309 	0x903c, 0xffffffff, 0x00000100,
310 	0x9040, 0xffffffff, 0x00000100,
311 	0xa200, 0xffffffff, 0x00000100,
312 	0xa204, 0xffffffff, 0x00000100,
313 	0xa208, 0xffffffff, 0x00000100,
314 	0xa20c, 0xffffffff, 0x00000100,
315 	0x971c, 0xffffffff, 0x00000100,
316 	0x977c, 0xffffffff, 0x00000100,
317 	0x3f80, 0xffffffff, 0x00000100,
318 	0xa210, 0xffffffff, 0x00000100,
319 	0xa214, 0xffffffff, 0x00000100,
320 	0x4d8, 0xffffffff, 0x00000100,
321 	0x9784, 0xffffffff, 0x00000100,
322 	0x9698, 0xffffffff, 0x00000100,
323 	0x4d4, 0xffffffff, 0x00000200,
324 	0x30cc, 0xffffffff, 0x00000100,
325 	0xd0c0, 0xffffffff, 0xff000100,
326 	0x802c, 0xffffffff, 0x40000000,
327 	0x915c, 0xffffffff, 0x00010000,
328 	0x9160, 0xffffffff, 0x00030002,
329 	0x9178, 0xffffffff, 0x00070000,
330 	0x917c, 0xffffffff, 0x00030002,
331 	0x9180, 0xffffffff, 0x00050004,
332 	0x918c, 0xffffffff, 0x00010006,
333 	0x9190, 0xffffffff, 0x00090008,
334 	0x9194, 0xffffffff, 0x00070000,
335 	0x9198, 0xffffffff, 0x00030002,
336 	0x919c, 0xffffffff, 0x00050004,
337 	0x91a8, 0xffffffff, 0x00010006,
338 	0x91ac, 0xffffffff, 0x00090008,
339 	0x91b0, 0xffffffff, 0x00070000,
340 	0x91b4, 0xffffffff, 0x00030002,
341 	0x91b8, 0xffffffff, 0x00050004,
342 	0x91c4, 0xffffffff, 0x00010006,
343 	0x91c8, 0xffffffff, 0x00090008,
344 	0x91cc, 0xffffffff, 0x00070000,
345 	0x91d0, 0xffffffff, 0x00030002,
346 	0x91d4, 0xffffffff, 0x00050004,
347 	0x91e0, 0xffffffff, 0x00010006,
348 	0x91e4, 0xffffffff, 0x00090008,
349 	0x91e8, 0xffffffff, 0x00000000,
350 	0x91ec, 0xffffffff, 0x00070000,
351 	0x91f0, 0xffffffff, 0x00030002,
352 	0x91f4, 0xffffffff, 0x00050004,
353 	0x9200, 0xffffffff, 0x00010006,
354 	0x9204, 0xffffffff, 0x00090008,
355 	0x9208, 0xffffffff, 0x00070000,
356 	0x920c, 0xffffffff, 0x00030002,
357 	0x9210, 0xffffffff, 0x00050004,
358 	0x921c, 0xffffffff, 0x00010006,
359 	0x9220, 0xffffffff, 0x00090008,
360 	0x9224, 0xffffffff, 0x00070000,
361 	0x9228, 0xffffffff, 0x00030002,
362 	0x922c, 0xffffffff, 0x00050004,
363 	0x9238, 0xffffffff, 0x00010006,
364 	0x923c, 0xffffffff, 0x00090008,
365 	0x9240, 0xffffffff, 0x00070000,
366 	0x9244, 0xffffffff, 0x00030002,
367 	0x9248, 0xffffffff, 0x00050004,
368 	0x9254, 0xffffffff, 0x00010006,
369 	0x9258, 0xffffffff, 0x00090008,
370 	0x925c, 0xffffffff, 0x00070000,
371 	0x9260, 0xffffffff, 0x00030002,
372 	0x9264, 0xffffffff, 0x00050004,
373 	0x9270, 0xffffffff, 0x00010006,
374 	0x9274, 0xffffffff, 0x00090008,
375 	0x9278, 0xffffffff, 0x00070000,
376 	0x927c, 0xffffffff, 0x00030002,
377 	0x9280, 0xffffffff, 0x00050004,
378 	0x928c, 0xffffffff, 0x00010006,
379 	0x9290, 0xffffffff, 0x00090008,
380 	0x9294, 0xffffffff, 0x00000000,
381 	0x929c, 0xffffffff, 0x00000001,
382 	0x802c, 0xffffffff, 0x40010000,
383 	0x915c, 0xffffffff, 0x00010000,
384 	0x9160, 0xffffffff, 0x00030002,
385 	0x9178, 0xffffffff, 0x00070000,
386 	0x917c, 0xffffffff, 0x00030002,
387 	0x9180, 0xffffffff, 0x00050004,
388 	0x918c, 0xffffffff, 0x00010006,
389 	0x9190, 0xffffffff, 0x00090008,
390 	0x9194, 0xffffffff, 0x00070000,
391 	0x9198, 0xffffffff, 0x00030002,
392 	0x919c, 0xffffffff, 0x00050004,
393 	0x91a8, 0xffffffff, 0x00010006,
394 	0x91ac, 0xffffffff, 0x00090008,
395 	0x91b0, 0xffffffff, 0x00070000,
396 	0x91b4, 0xffffffff, 0x00030002,
397 	0x91b8, 0xffffffff, 0x00050004,
398 	0x91c4, 0xffffffff, 0x00010006,
399 	0x91c8, 0xffffffff, 0x00090008,
400 	0x91cc, 0xffffffff, 0x00070000,
401 	0x91d0, 0xffffffff, 0x00030002,
402 	0x91d4, 0xffffffff, 0x00050004,
403 	0x91e0, 0xffffffff, 0x00010006,
404 	0x91e4, 0xffffffff, 0x00090008,
405 	0x91e8, 0xffffffff, 0x00000000,
406 	0x91ec, 0xffffffff, 0x00070000,
407 	0x91f0, 0xffffffff, 0x00030002,
408 	0x91f4, 0xffffffff, 0x00050004,
409 	0x9200, 0xffffffff, 0x00010006,
410 	0x9204, 0xffffffff, 0x00090008,
411 	0x9208, 0xffffffff, 0x00070000,
412 	0x920c, 0xffffffff, 0x00030002,
413 	0x9210, 0xffffffff, 0x00050004,
414 	0x921c, 0xffffffff, 0x00010006,
415 	0x9220, 0xffffffff, 0x00090008,
416 	0x9224, 0xffffffff, 0x00070000,
417 	0x9228, 0xffffffff, 0x00030002,
418 	0x922c, 0xffffffff, 0x00050004,
419 	0x9238, 0xffffffff, 0x00010006,
420 	0x923c, 0xffffffff, 0x00090008,
421 	0x9240, 0xffffffff, 0x00070000,
422 	0x9244, 0xffffffff, 0x00030002,
423 	0x9248, 0xffffffff, 0x00050004,
424 	0x9254, 0xffffffff, 0x00010006,
425 	0x9258, 0xffffffff, 0x00090008,
426 	0x925c, 0xffffffff, 0x00070000,
427 	0x9260, 0xffffffff, 0x00030002,
428 	0x9264, 0xffffffff, 0x00050004,
429 	0x9270, 0xffffffff, 0x00010006,
430 	0x9274, 0xffffffff, 0x00090008,
431 	0x9278, 0xffffffff, 0x00070000,
432 	0x927c, 0xffffffff, 0x00030002,
433 	0x9280, 0xffffffff, 0x00050004,
434 	0x928c, 0xffffffff, 0x00010006,
435 	0x9290, 0xffffffff, 0x00090008,
436 	0x9294, 0xffffffff, 0x00000000,
437 	0x929c, 0xffffffff, 0x00000001,
438 	0x802c, 0xffffffff, 0xc0000000
439 };
440 
441 static const u32 redwood_mgcg_init[] =
442 {
443 	0x802c, 0xffffffff, 0xc0000000,
444 	0x5448, 0xffffffff, 0x00000100,
445 	0x55e4, 0xffffffff, 0x00000100,
446 	0x160c, 0xffffffff, 0x00000100,
447 	0x5644, 0xffffffff, 0x00000100,
448 	0xc164, 0xffffffff, 0x00000100,
449 	0x8a18, 0xffffffff, 0x00000100,
450 	0x897c, 0xffffffff, 0x06000100,
451 	0x8b28, 0xffffffff, 0x00000100,
452 	0x9144, 0xffffffff, 0x00000100,
453 	0x9a60, 0xffffffff, 0x00000100,
454 	0x9868, 0xffffffff, 0x00000100,
455 	0x8d58, 0xffffffff, 0x00000100,
456 	0x9510, 0xffffffff, 0x00000100,
457 	0x949c, 0xffffffff, 0x00000100,
458 	0x9654, 0xffffffff, 0x00000100,
459 	0x9030, 0xffffffff, 0x00000100,
460 	0x9034, 0xffffffff, 0x00000100,
461 	0x9038, 0xffffffff, 0x00000100,
462 	0x903c, 0xffffffff, 0x00000100,
463 	0x9040, 0xffffffff, 0x00000100,
464 	0xa200, 0xffffffff, 0x00000100,
465 	0xa204, 0xffffffff, 0x00000100,
466 	0xa208, 0xffffffff, 0x00000100,
467 	0xa20c, 0xffffffff, 0x00000100,
468 	0x971c, 0xffffffff, 0x00000100,
469 	0x977c, 0xffffffff, 0x00000100,
470 	0x3f80, 0xffffffff, 0x00000100,
471 	0xa210, 0xffffffff, 0x00000100,
472 	0xa214, 0xffffffff, 0x00000100,
473 	0x4d8, 0xffffffff, 0x00000100,
474 	0x9784, 0xffffffff, 0x00000100,
475 	0x9698, 0xffffffff, 0x00000100,
476 	0x4d4, 0xffffffff, 0x00000200,
477 	0x30cc, 0xffffffff, 0x00000100,
478 	0xd0c0, 0xffffffff, 0xff000100,
479 	0x802c, 0xffffffff, 0x40000000,
480 	0x915c, 0xffffffff, 0x00010000,
481 	0x9160, 0xffffffff, 0x00030002,
482 	0x9178, 0xffffffff, 0x00070000,
483 	0x917c, 0xffffffff, 0x00030002,
484 	0x9180, 0xffffffff, 0x00050004,
485 	0x918c, 0xffffffff, 0x00010006,
486 	0x9190, 0xffffffff, 0x00090008,
487 	0x9194, 0xffffffff, 0x00070000,
488 	0x9198, 0xffffffff, 0x00030002,
489 	0x919c, 0xffffffff, 0x00050004,
490 	0x91a8, 0xffffffff, 0x00010006,
491 	0x91ac, 0xffffffff, 0x00090008,
492 	0x91b0, 0xffffffff, 0x00070000,
493 	0x91b4, 0xffffffff, 0x00030002,
494 	0x91b8, 0xffffffff, 0x00050004,
495 	0x91c4, 0xffffffff, 0x00010006,
496 	0x91c8, 0xffffffff, 0x00090008,
497 	0x91cc, 0xffffffff, 0x00070000,
498 	0x91d0, 0xffffffff, 0x00030002,
499 	0x91d4, 0xffffffff, 0x00050004,
500 	0x91e0, 0xffffffff, 0x00010006,
501 	0x91e4, 0xffffffff, 0x00090008,
502 	0x91e8, 0xffffffff, 0x00000000,
503 	0x91ec, 0xffffffff, 0x00070000,
504 	0x91f0, 0xffffffff, 0x00030002,
505 	0x91f4, 0xffffffff, 0x00050004,
506 	0x9200, 0xffffffff, 0x00010006,
507 	0x9204, 0xffffffff, 0x00090008,
508 	0x9294, 0xffffffff, 0x00000000,
509 	0x929c, 0xffffffff, 0x00000001,
510 	0x802c, 0xffffffff, 0xc0000000
511 };
512 
513 static const u32 cedar_golden_registers[] =
514 {
515 	0x3f90, 0xffff0000, 0xff000000,
516 	0x9148, 0xffff0000, 0xff000000,
517 	0x3f94, 0xffff0000, 0xff000000,
518 	0x914c, 0xffff0000, 0xff000000,
519 	0x9b7c, 0xffffffff, 0x00000000,
520 	0x8a14, 0xffffffff, 0x00000007,
521 	0x8b10, 0xffffffff, 0x00000000,
522 	0x960c, 0xffffffff, 0x54763210,
523 	0x88c4, 0xffffffff, 0x000000c2,
524 	0x88d4, 0xffffffff, 0x00000000,
525 	0x8974, 0xffffffff, 0x00000000,
526 	0xc78, 0x00000080, 0x00000080,
527 	0x5eb4, 0xffffffff, 0x00000002,
528 	0x5e78, 0xffffffff, 0x001000f0,
529 	0x6104, 0x01000300, 0x00000000,
530 	0x5bc0, 0x00300000, 0x00000000,
531 	0x7030, 0xffffffff, 0x00000011,
532 	0x7c30, 0xffffffff, 0x00000011,
533 	0x10830, 0xffffffff, 0x00000011,
534 	0x11430, 0xffffffff, 0x00000011,
535 	0xd02c, 0xffffffff, 0x08421000,
536 	0x240c, 0xffffffff, 0x00000380,
537 	0x8b24, 0xffffffff, 0x00ff0fff,
538 	0x28a4c, 0x06000000, 0x06000000,
539 	0x10c, 0x00000001, 0x00000001,
540 	0x8d00, 0xffffffff, 0x100e4848,
541 	0x8d04, 0xffffffff, 0x00164745,
542 	0x8c00, 0xffffffff, 0xe4000003,
543 	0x8c04, 0xffffffff, 0x40600060,
544 	0x8c08, 0xffffffff, 0x001c001c,
545 	0x8cf0, 0xffffffff, 0x08e00410,
546 	0x8c20, 0xffffffff, 0x00800080,
547 	0x8c24, 0xffffffff, 0x00800080,
548 	0x8c18, 0xffffffff, 0x20202078,
549 	0x8c1c, 0xffffffff, 0x00001010,
550 	0x28350, 0xffffffff, 0x00000000,
551 	0xa008, 0xffffffff, 0x00010000,
552 	0x5c4, 0xffffffff, 0x00000001,
553 	0x9508, 0xffffffff, 0x00000002
554 };
555 
556 static const u32 cedar_mgcg_init[] =
557 {
558 	0x802c, 0xffffffff, 0xc0000000,
559 	0x5448, 0xffffffff, 0x00000100,
560 	0x55e4, 0xffffffff, 0x00000100,
561 	0x160c, 0xffffffff, 0x00000100,
562 	0x5644, 0xffffffff, 0x00000100,
563 	0xc164, 0xffffffff, 0x00000100,
564 	0x8a18, 0xffffffff, 0x00000100,
565 	0x897c, 0xffffffff, 0x06000100,
566 	0x8b28, 0xffffffff, 0x00000100,
567 	0x9144, 0xffffffff, 0x00000100,
568 	0x9a60, 0xffffffff, 0x00000100,
569 	0x9868, 0xffffffff, 0x00000100,
570 	0x8d58, 0xffffffff, 0x00000100,
571 	0x9510, 0xffffffff, 0x00000100,
572 	0x949c, 0xffffffff, 0x00000100,
573 	0x9654, 0xffffffff, 0x00000100,
574 	0x9030, 0xffffffff, 0x00000100,
575 	0x9034, 0xffffffff, 0x00000100,
576 	0x9038, 0xffffffff, 0x00000100,
577 	0x903c, 0xffffffff, 0x00000100,
578 	0x9040, 0xffffffff, 0x00000100,
579 	0xa200, 0xffffffff, 0x00000100,
580 	0xa204, 0xffffffff, 0x00000100,
581 	0xa208, 0xffffffff, 0x00000100,
582 	0xa20c, 0xffffffff, 0x00000100,
583 	0x971c, 0xffffffff, 0x00000100,
584 	0x977c, 0xffffffff, 0x00000100,
585 	0x3f80, 0xffffffff, 0x00000100,
586 	0xa210, 0xffffffff, 0x00000100,
587 	0xa214, 0xffffffff, 0x00000100,
588 	0x4d8, 0xffffffff, 0x00000100,
589 	0x9784, 0xffffffff, 0x00000100,
590 	0x9698, 0xffffffff, 0x00000100,
591 	0x4d4, 0xffffffff, 0x00000200,
592 	0x30cc, 0xffffffff, 0x00000100,
593 	0xd0c0, 0xffffffff, 0xff000100,
594 	0x802c, 0xffffffff, 0x40000000,
595 	0x915c, 0xffffffff, 0x00010000,
596 	0x9178, 0xffffffff, 0x00050000,
597 	0x917c, 0xffffffff, 0x00030002,
598 	0x918c, 0xffffffff, 0x00010004,
599 	0x9190, 0xffffffff, 0x00070006,
600 	0x9194, 0xffffffff, 0x00050000,
601 	0x9198, 0xffffffff, 0x00030002,
602 	0x91a8, 0xffffffff, 0x00010004,
603 	0x91ac, 0xffffffff, 0x00070006,
604 	0x91e8, 0xffffffff, 0x00000000,
605 	0x9294, 0xffffffff, 0x00000000,
606 	0x929c, 0xffffffff, 0x00000001,
607 	0x802c, 0xffffffff, 0xc0000000
608 };
609 
610 static const u32 juniper_mgcg_init[] =
611 {
612 	0x802c, 0xffffffff, 0xc0000000,
613 	0x5448, 0xffffffff, 0x00000100,
614 	0x55e4, 0xffffffff, 0x00000100,
615 	0x160c, 0xffffffff, 0x00000100,
616 	0x5644, 0xffffffff, 0x00000100,
617 	0xc164, 0xffffffff, 0x00000100,
618 	0x8a18, 0xffffffff, 0x00000100,
619 	0x897c, 0xffffffff, 0x06000100,
620 	0x8b28, 0xffffffff, 0x00000100,
621 	0x9144, 0xffffffff, 0x00000100,
622 	0x9a60, 0xffffffff, 0x00000100,
623 	0x9868, 0xffffffff, 0x00000100,
624 	0x8d58, 0xffffffff, 0x00000100,
625 	0x9510, 0xffffffff, 0x00000100,
626 	0x949c, 0xffffffff, 0x00000100,
627 	0x9654, 0xffffffff, 0x00000100,
628 	0x9030, 0xffffffff, 0x00000100,
629 	0x9034, 0xffffffff, 0x00000100,
630 	0x9038, 0xffffffff, 0x00000100,
631 	0x903c, 0xffffffff, 0x00000100,
632 	0x9040, 0xffffffff, 0x00000100,
633 	0xa200, 0xffffffff, 0x00000100,
634 	0xa204, 0xffffffff, 0x00000100,
635 	0xa208, 0xffffffff, 0x00000100,
636 	0xa20c, 0xffffffff, 0x00000100,
637 	0x971c, 0xffffffff, 0x00000100,
638 	0xd0c0, 0xffffffff, 0xff000100,
639 	0x802c, 0xffffffff, 0x40000000,
640 	0x915c, 0xffffffff, 0x00010000,
641 	0x9160, 0xffffffff, 0x00030002,
642 	0x9178, 0xffffffff, 0x00070000,
643 	0x917c, 0xffffffff, 0x00030002,
644 	0x9180, 0xffffffff, 0x00050004,
645 	0x918c, 0xffffffff, 0x00010006,
646 	0x9190, 0xffffffff, 0x00090008,
647 	0x9194, 0xffffffff, 0x00070000,
648 	0x9198, 0xffffffff, 0x00030002,
649 	0x919c, 0xffffffff, 0x00050004,
650 	0x91a8, 0xffffffff, 0x00010006,
651 	0x91ac, 0xffffffff, 0x00090008,
652 	0x91b0, 0xffffffff, 0x00070000,
653 	0x91b4, 0xffffffff, 0x00030002,
654 	0x91b8, 0xffffffff, 0x00050004,
655 	0x91c4, 0xffffffff, 0x00010006,
656 	0x91c8, 0xffffffff, 0x00090008,
657 	0x91cc, 0xffffffff, 0x00070000,
658 	0x91d0, 0xffffffff, 0x00030002,
659 	0x91d4, 0xffffffff, 0x00050004,
660 	0x91e0, 0xffffffff, 0x00010006,
661 	0x91e4, 0xffffffff, 0x00090008,
662 	0x91e8, 0xffffffff, 0x00000000,
663 	0x91ec, 0xffffffff, 0x00070000,
664 	0x91f0, 0xffffffff, 0x00030002,
665 	0x91f4, 0xffffffff, 0x00050004,
666 	0x9200, 0xffffffff, 0x00010006,
667 	0x9204, 0xffffffff, 0x00090008,
668 	0x9208, 0xffffffff, 0x00070000,
669 	0x920c, 0xffffffff, 0x00030002,
670 	0x9210, 0xffffffff, 0x00050004,
671 	0x921c, 0xffffffff, 0x00010006,
672 	0x9220, 0xffffffff, 0x00090008,
673 	0x9224, 0xffffffff, 0x00070000,
674 	0x9228, 0xffffffff, 0x00030002,
675 	0x922c, 0xffffffff, 0x00050004,
676 	0x9238, 0xffffffff, 0x00010006,
677 	0x923c, 0xffffffff, 0x00090008,
678 	0x9240, 0xffffffff, 0x00070000,
679 	0x9244, 0xffffffff, 0x00030002,
680 	0x9248, 0xffffffff, 0x00050004,
681 	0x9254, 0xffffffff, 0x00010006,
682 	0x9258, 0xffffffff, 0x00090008,
683 	0x925c, 0xffffffff, 0x00070000,
684 	0x9260, 0xffffffff, 0x00030002,
685 	0x9264, 0xffffffff, 0x00050004,
686 	0x9270, 0xffffffff, 0x00010006,
687 	0x9274, 0xffffffff, 0x00090008,
688 	0x9278, 0xffffffff, 0x00070000,
689 	0x927c, 0xffffffff, 0x00030002,
690 	0x9280, 0xffffffff, 0x00050004,
691 	0x928c, 0xffffffff, 0x00010006,
692 	0x9290, 0xffffffff, 0x00090008,
693 	0x9294, 0xffffffff, 0x00000000,
694 	0x929c, 0xffffffff, 0x00000001,
695 	0x802c, 0xffffffff, 0xc0000000,
696 	0x977c, 0xffffffff, 0x00000100,
697 	0x3f80, 0xffffffff, 0x00000100,
698 	0xa210, 0xffffffff, 0x00000100,
699 	0xa214, 0xffffffff, 0x00000100,
700 	0x4d8, 0xffffffff, 0x00000100,
701 	0x9784, 0xffffffff, 0x00000100,
702 	0x9698, 0xffffffff, 0x00000100,
703 	0x4d4, 0xffffffff, 0x00000200,
704 	0x30cc, 0xffffffff, 0x00000100,
705 	0x802c, 0xffffffff, 0xc0000000
706 };
707 
708 static const u32 supersumo_golden_registers[] =
709 {
710 	0x5eb4, 0xffffffff, 0x00000002,
711 	0x5c4, 0xffffffff, 0x00000001,
712 	0x7030, 0xffffffff, 0x00000011,
713 	0x7c30, 0xffffffff, 0x00000011,
714 	0x6104, 0x01000300, 0x00000000,
715 	0x5bc0, 0x00300000, 0x00000000,
716 	0x8c04, 0xffffffff, 0x40600060,
717 	0x8c08, 0xffffffff, 0x001c001c,
718 	0x8c20, 0xffffffff, 0x00800080,
719 	0x8c24, 0xffffffff, 0x00800080,
720 	0x8c18, 0xffffffff, 0x20202078,
721 	0x8c1c, 0xffffffff, 0x00001010,
722 	0x918c, 0xffffffff, 0x00010006,
723 	0x91a8, 0xffffffff, 0x00010006,
724 	0x91c4, 0xffffffff, 0x00010006,
725 	0x91e0, 0xffffffff, 0x00010006,
726 	0x9200, 0xffffffff, 0x00010006,
727 	0x9150, 0xffffffff, 0x6e944040,
728 	0x917c, 0xffffffff, 0x00030002,
729 	0x9180, 0xffffffff, 0x00050004,
730 	0x9198, 0xffffffff, 0x00030002,
731 	0x919c, 0xffffffff, 0x00050004,
732 	0x91b4, 0xffffffff, 0x00030002,
733 	0x91b8, 0xffffffff, 0x00050004,
734 	0x91d0, 0xffffffff, 0x00030002,
735 	0x91d4, 0xffffffff, 0x00050004,
736 	0x91f0, 0xffffffff, 0x00030002,
737 	0x91f4, 0xffffffff, 0x00050004,
738 	0x915c, 0xffffffff, 0x00010000,
739 	0x9160, 0xffffffff, 0x00030002,
740 	0x3f90, 0xffff0000, 0xff000000,
741 	0x9178, 0xffffffff, 0x00070000,
742 	0x9194, 0xffffffff, 0x00070000,
743 	0x91b0, 0xffffffff, 0x00070000,
744 	0x91cc, 0xffffffff, 0x00070000,
745 	0x91ec, 0xffffffff, 0x00070000,
746 	0x9148, 0xffff0000, 0xff000000,
747 	0x9190, 0xffffffff, 0x00090008,
748 	0x91ac, 0xffffffff, 0x00090008,
749 	0x91c8, 0xffffffff, 0x00090008,
750 	0x91e4, 0xffffffff, 0x00090008,
751 	0x9204, 0xffffffff, 0x00090008,
752 	0x3f94, 0xffff0000, 0xff000000,
753 	0x914c, 0xffff0000, 0xff000000,
754 	0x929c, 0xffffffff, 0x00000001,
755 	0x8a18, 0xffffffff, 0x00000100,
756 	0x8b28, 0xffffffff, 0x00000100,
757 	0x9144, 0xffffffff, 0x00000100,
758 	0x5644, 0xffffffff, 0x00000100,
759 	0x9b7c, 0xffffffff, 0x00000000,
760 	0x8030, 0xffffffff, 0x0000100a,
761 	0x8a14, 0xffffffff, 0x00000007,
762 	0x8b24, 0xffffffff, 0x00ff0fff,
763 	0x8b10, 0xffffffff, 0x00000000,
764 	0x28a4c, 0x06000000, 0x06000000,
765 	0x4d8, 0xffffffff, 0x00000100,
766 	0x913c, 0xffff000f, 0x0100000a,
767 	0x960c, 0xffffffff, 0x54763210,
768 	0x88c4, 0xffffffff, 0x000000c2,
769 	0x88d4, 0xffffffff, 0x00000010,
770 	0x8974, 0xffffffff, 0x00000000,
771 	0xc78, 0x00000080, 0x00000080,
772 	0x5e78, 0xffffffff, 0x001000f0,
773 	0xd02c, 0xffffffff, 0x08421000,
774 	0xa008, 0xffffffff, 0x00010000,
775 	0x8d00, 0xffffffff, 0x100e4848,
776 	0x8d04, 0xffffffff, 0x00164745,
777 	0x8c00, 0xffffffff, 0xe4000003,
778 	0x8cf0, 0x1fffffff, 0x08e00620,
779 	0x28350, 0xffffffff, 0x00000000,
780 	0x9508, 0xffffffff, 0x00000002
781 };
782 
783 static const u32 sumo_golden_registers[] =
784 {
785 	0x900c, 0x00ffffff, 0x0017071f,
786 	0x8c18, 0xffffffff, 0x10101060,
787 	0x8c1c, 0xffffffff, 0x00001010,
788 	0x8c30, 0x0000000f, 0x00000005,
789 	0x9688, 0x0000000f, 0x00000007
790 };
791 
792 static const u32 wrestler_golden_registers[] =
793 {
794 	0x5eb4, 0xffffffff, 0x00000002,
795 	0x5c4, 0xffffffff, 0x00000001,
796 	0x7030, 0xffffffff, 0x00000011,
797 	0x7c30, 0xffffffff, 0x00000011,
798 	0x6104, 0x01000300, 0x00000000,
799 	0x5bc0, 0x00300000, 0x00000000,
800 	0x918c, 0xffffffff, 0x00010006,
801 	0x91a8, 0xffffffff, 0x00010006,
802 	0x9150, 0xffffffff, 0x6e944040,
803 	0x917c, 0xffffffff, 0x00030002,
804 	0x9198, 0xffffffff, 0x00030002,
805 	0x915c, 0xffffffff, 0x00010000,
806 	0x3f90, 0xffff0000, 0xff000000,
807 	0x9178, 0xffffffff, 0x00070000,
808 	0x9194, 0xffffffff, 0x00070000,
809 	0x9148, 0xffff0000, 0xff000000,
810 	0x9190, 0xffffffff, 0x00090008,
811 	0x91ac, 0xffffffff, 0x00090008,
812 	0x3f94, 0xffff0000, 0xff000000,
813 	0x914c, 0xffff0000, 0xff000000,
814 	0x929c, 0xffffffff, 0x00000001,
815 	0x8a18, 0xffffffff, 0x00000100,
816 	0x8b28, 0xffffffff, 0x00000100,
817 	0x9144, 0xffffffff, 0x00000100,
818 	0x9b7c, 0xffffffff, 0x00000000,
819 	0x8030, 0xffffffff, 0x0000100a,
820 	0x8a14, 0xffffffff, 0x00000001,
821 	0x8b24, 0xffffffff, 0x00ff0fff,
822 	0x8b10, 0xffffffff, 0x00000000,
823 	0x28a4c, 0x06000000, 0x06000000,
824 	0x4d8, 0xffffffff, 0x00000100,
825 	0x913c, 0xffff000f, 0x0100000a,
826 	0x960c, 0xffffffff, 0x54763210,
827 	0x88c4, 0xffffffff, 0x000000c2,
828 	0x88d4, 0xffffffff, 0x00000010,
829 	0x8974, 0xffffffff, 0x00000000,
830 	0xc78, 0x00000080, 0x00000080,
831 	0x5e78, 0xffffffff, 0x001000f0,
832 	0xd02c, 0xffffffff, 0x08421000,
833 	0xa008, 0xffffffff, 0x00010000,
834 	0x8d00, 0xffffffff, 0x100e4848,
835 	0x8d04, 0xffffffff, 0x00164745,
836 	0x8c00, 0xffffffff, 0xe4000003,
837 	0x8cf0, 0x1fffffff, 0x08e00410,
838 	0x28350, 0xffffffff, 0x00000000,
839 	0x9508, 0xffffffff, 0x00000002,
840 	0x900c, 0xffffffff, 0x0017071f,
841 	0x8c18, 0xffffffff, 0x10101060,
842 	0x8c1c, 0xffffffff, 0x00001010
843 };
844 
845 static const u32 barts_golden_registers[] =
846 {
847 	0x5eb4, 0xffffffff, 0x00000002,
848 	0x5e78, 0x8f311ff1, 0x001000f0,
849 	0x3f90, 0xffff0000, 0xff000000,
850 	0x9148, 0xffff0000, 0xff000000,
851 	0x3f94, 0xffff0000, 0xff000000,
852 	0x914c, 0xffff0000, 0xff000000,
853 	0xc78, 0x00000080, 0x00000080,
854 	0xbd4, 0x70073777, 0x00010001,
855 	0xd02c, 0xbfffff1f, 0x08421000,
856 	0xd0b8, 0x03773777, 0x02011003,
857 	0x5bc0, 0x00200000, 0x50100000,
858 	0x98f8, 0x33773777, 0x02011003,
859 	0x98fc, 0xffffffff, 0x76543210,
860 	0x7030, 0x31000311, 0x00000011,
861 	0x2f48, 0x00000007, 0x02011003,
862 	0x6b28, 0x00000010, 0x00000012,
863 	0x7728, 0x00000010, 0x00000012,
864 	0x10328, 0x00000010, 0x00000012,
865 	0x10f28, 0x00000010, 0x00000012,
866 	0x11b28, 0x00000010, 0x00000012,
867 	0x12728, 0x00000010, 0x00000012,
868 	0x240c, 0x000007ff, 0x00000380,
869 	0x8a14, 0xf000001f, 0x00000007,
870 	0x8b24, 0x3fff3fff, 0x00ff0fff,
871 	0x8b10, 0x0000ff0f, 0x00000000,
872 	0x28a4c, 0x07ffffff, 0x06000000,
873 	0x10c, 0x00000001, 0x00010003,
874 	0xa02c, 0xffffffff, 0x0000009b,
875 	0x913c, 0x0000000f, 0x0100000a,
876 	0x8d00, 0xffff7f7f, 0x100e4848,
877 	0x8d04, 0x00ffffff, 0x00164745,
878 	0x8c00, 0xfffc0003, 0xe4000003,
879 	0x8c04, 0xf8ff00ff, 0x40600060,
880 	0x8c08, 0x00ff00ff, 0x001c001c,
881 	0x8cf0, 0x1fff1fff, 0x08e00620,
882 	0x8c20, 0x0fff0fff, 0x00800080,
883 	0x8c24, 0x0fff0fff, 0x00800080,
884 	0x8c18, 0xffffffff, 0x20202078,
885 	0x8c1c, 0x0000ffff, 0x00001010,
886 	0x28350, 0x00000f01, 0x00000000,
887 	0x9508, 0x3700001f, 0x00000002,
888 	0x960c, 0xffffffff, 0x54763210,
889 	0x88c4, 0x001f3ae3, 0x000000c2,
890 	0x88d4, 0x0000001f, 0x00000010,
891 	0x8974, 0xffffffff, 0x00000000
892 };
893 
894 static const u32 turks_golden_registers[] =
895 {
896 	0x5eb4, 0xffffffff, 0x00000002,
897 	0x5e78, 0x8f311ff1, 0x001000f0,
898 	0x8c8, 0x00003000, 0x00001070,
899 	0x8cc, 0x000fffff, 0x00040035,
900 	0x3f90, 0xffff0000, 0xfff00000,
901 	0x9148, 0xffff0000, 0xfff00000,
902 	0x3f94, 0xffff0000, 0xfff00000,
903 	0x914c, 0xffff0000, 0xfff00000,
904 	0xc78, 0x00000080, 0x00000080,
905 	0xbd4, 0x00073007, 0x00010002,
906 	0xd02c, 0xbfffff1f, 0x08421000,
907 	0xd0b8, 0x03773777, 0x02010002,
908 	0x5bc0, 0x00200000, 0x50100000,
909 	0x98f8, 0x33773777, 0x00010002,
910 	0x98fc, 0xffffffff, 0x33221100,
911 	0x7030, 0x31000311, 0x00000011,
912 	0x2f48, 0x33773777, 0x00010002,
913 	0x6b28, 0x00000010, 0x00000012,
914 	0x7728, 0x00000010, 0x00000012,
915 	0x10328, 0x00000010, 0x00000012,
916 	0x10f28, 0x00000010, 0x00000012,
917 	0x11b28, 0x00000010, 0x00000012,
918 	0x12728, 0x00000010, 0x00000012,
919 	0x240c, 0x000007ff, 0x00000380,
920 	0x8a14, 0xf000001f, 0x00000007,
921 	0x8b24, 0x3fff3fff, 0x00ff0fff,
922 	0x8b10, 0x0000ff0f, 0x00000000,
923 	0x28a4c, 0x07ffffff, 0x06000000,
924 	0x10c, 0x00000001, 0x00010003,
925 	0xa02c, 0xffffffff, 0x0000009b,
926 	0x913c, 0x0000000f, 0x0100000a,
927 	0x8d00, 0xffff7f7f, 0x100e4848,
928 	0x8d04, 0x00ffffff, 0x00164745,
929 	0x8c00, 0xfffc0003, 0xe4000003,
930 	0x8c04, 0xf8ff00ff, 0x40600060,
931 	0x8c08, 0x00ff00ff, 0x001c001c,
932 	0x8cf0, 0x1fff1fff, 0x08e00410,
933 	0x8c20, 0x0fff0fff, 0x00800080,
934 	0x8c24, 0x0fff0fff, 0x00800080,
935 	0x8c18, 0xffffffff, 0x20202078,
936 	0x8c1c, 0x0000ffff, 0x00001010,
937 	0x28350, 0x00000f01, 0x00000000,
938 	0x9508, 0x3700001f, 0x00000002,
939 	0x960c, 0xffffffff, 0x54763210,
940 	0x88c4, 0x001f3ae3, 0x000000c2,
941 	0x88d4, 0x0000001f, 0x00000010,
942 	0x8974, 0xffffffff, 0x00000000
943 };
944 
945 static const u32 caicos_golden_registers[] =
946 {
947 	0x5eb4, 0xffffffff, 0x00000002,
948 	0x5e78, 0x8f311ff1, 0x001000f0,
949 	0x8c8, 0x00003420, 0x00001450,
950 	0x8cc, 0x000fffff, 0x00040035,
951 	0x3f90, 0xffff0000, 0xfffc0000,
952 	0x9148, 0xffff0000, 0xfffc0000,
953 	0x3f94, 0xffff0000, 0xfffc0000,
954 	0x914c, 0xffff0000, 0xfffc0000,
955 	0xc78, 0x00000080, 0x00000080,
956 	0xbd4, 0x00073007, 0x00010001,
957 	0xd02c, 0xbfffff1f, 0x08421000,
958 	0xd0b8, 0x03773777, 0x02010001,
959 	0x5bc0, 0x00200000, 0x50100000,
960 	0x98f8, 0x33773777, 0x02010001,
961 	0x98fc, 0xffffffff, 0x33221100,
962 	0x7030, 0x31000311, 0x00000011,
963 	0x2f48, 0x33773777, 0x02010001,
964 	0x6b28, 0x00000010, 0x00000012,
965 	0x7728, 0x00000010, 0x00000012,
966 	0x10328, 0x00000010, 0x00000012,
967 	0x10f28, 0x00000010, 0x00000012,
968 	0x11b28, 0x00000010, 0x00000012,
969 	0x12728, 0x00000010, 0x00000012,
970 	0x240c, 0x000007ff, 0x00000380,
971 	0x8a14, 0xf000001f, 0x00000001,
972 	0x8b24, 0x3fff3fff, 0x00ff0fff,
973 	0x8b10, 0x0000ff0f, 0x00000000,
974 	0x28a4c, 0x07ffffff, 0x06000000,
975 	0x10c, 0x00000001, 0x00010003,
976 	0xa02c, 0xffffffff, 0x0000009b,
977 	0x913c, 0x0000000f, 0x0100000a,
978 	0x8d00, 0xffff7f7f, 0x100e4848,
979 	0x8d04, 0x00ffffff, 0x00164745,
980 	0x8c00, 0xfffc0003, 0xe4000003,
981 	0x8c04, 0xf8ff00ff, 0x40600060,
982 	0x8c08, 0x00ff00ff, 0x001c001c,
983 	0x8cf0, 0x1fff1fff, 0x08e00410,
984 	0x8c20, 0x0fff0fff, 0x00800080,
985 	0x8c24, 0x0fff0fff, 0x00800080,
986 	0x8c18, 0xffffffff, 0x20202078,
987 	0x8c1c, 0x0000ffff, 0x00001010,
988 	0x28350, 0x00000f01, 0x00000000,
989 	0x9508, 0x3700001f, 0x00000002,
990 	0x960c, 0xffffffff, 0x54763210,
991 	0x88c4, 0x001f3ae3, 0x000000c2,
992 	0x88d4, 0x0000001f, 0x00000010,
993 	0x8974, 0xffffffff, 0x00000000
994 };
995 
evergreen_init_golden_registers(struct radeon_device * rdev)996 static void evergreen_init_golden_registers(struct radeon_device *rdev)
997 {
998 	switch (rdev->family) {
999 	case CHIP_CYPRESS:
1000 	case CHIP_HEMLOCK:
1001 		radeon_program_register_sequence(rdev,
1002 						 evergreen_golden_registers,
1003 						 (const u32)ARRAY_SIZE(evergreen_golden_registers));
1004 		radeon_program_register_sequence(rdev,
1005 						 evergreen_golden_registers2,
1006 						 (const u32)ARRAY_SIZE(evergreen_golden_registers2));
1007 		radeon_program_register_sequence(rdev,
1008 						 cypress_mgcg_init,
1009 						 (const u32)ARRAY_SIZE(cypress_mgcg_init));
1010 		break;
1011 	case CHIP_JUNIPER:
1012 		radeon_program_register_sequence(rdev,
1013 						 evergreen_golden_registers,
1014 						 (const u32)ARRAY_SIZE(evergreen_golden_registers));
1015 		radeon_program_register_sequence(rdev,
1016 						 evergreen_golden_registers2,
1017 						 (const u32)ARRAY_SIZE(evergreen_golden_registers2));
1018 		radeon_program_register_sequence(rdev,
1019 						 juniper_mgcg_init,
1020 						 (const u32)ARRAY_SIZE(juniper_mgcg_init));
1021 		break;
1022 	case CHIP_REDWOOD:
1023 		radeon_program_register_sequence(rdev,
1024 						 evergreen_golden_registers,
1025 						 (const u32)ARRAY_SIZE(evergreen_golden_registers));
1026 		radeon_program_register_sequence(rdev,
1027 						 evergreen_golden_registers2,
1028 						 (const u32)ARRAY_SIZE(evergreen_golden_registers2));
1029 		radeon_program_register_sequence(rdev,
1030 						 redwood_mgcg_init,
1031 						 (const u32)ARRAY_SIZE(redwood_mgcg_init));
1032 		break;
1033 	case CHIP_CEDAR:
1034 		radeon_program_register_sequence(rdev,
1035 						 cedar_golden_registers,
1036 						 (const u32)ARRAY_SIZE(cedar_golden_registers));
1037 		radeon_program_register_sequence(rdev,
1038 						 evergreen_golden_registers2,
1039 						 (const u32)ARRAY_SIZE(evergreen_golden_registers2));
1040 		radeon_program_register_sequence(rdev,
1041 						 cedar_mgcg_init,
1042 						 (const u32)ARRAY_SIZE(cedar_mgcg_init));
1043 		break;
1044 	case CHIP_PALM:
1045 		radeon_program_register_sequence(rdev,
1046 						 wrestler_golden_registers,
1047 						 (const u32)ARRAY_SIZE(wrestler_golden_registers));
1048 		break;
1049 	case CHIP_SUMO:
1050 		radeon_program_register_sequence(rdev,
1051 						 supersumo_golden_registers,
1052 						 (const u32)ARRAY_SIZE(supersumo_golden_registers));
1053 		break;
1054 	case CHIP_SUMO2:
1055 		radeon_program_register_sequence(rdev,
1056 						 supersumo_golden_registers,
1057 						 (const u32)ARRAY_SIZE(supersumo_golden_registers));
1058 		radeon_program_register_sequence(rdev,
1059 						 sumo_golden_registers,
1060 						 (const u32)ARRAY_SIZE(sumo_golden_registers));
1061 		break;
1062 	case CHIP_BARTS:
1063 		radeon_program_register_sequence(rdev,
1064 						 barts_golden_registers,
1065 						 (const u32)ARRAY_SIZE(barts_golden_registers));
1066 		break;
1067 	case CHIP_TURKS:
1068 		radeon_program_register_sequence(rdev,
1069 						 turks_golden_registers,
1070 						 (const u32)ARRAY_SIZE(turks_golden_registers));
1071 		break;
1072 	case CHIP_CAICOS:
1073 		radeon_program_register_sequence(rdev,
1074 						 caicos_golden_registers,
1075 						 (const u32)ARRAY_SIZE(caicos_golden_registers));
1076 		break;
1077 	default:
1078 		break;
1079 	}
1080 }
1081 
1082 /**
1083  * evergreen_get_allowed_info_register - fetch the register for the info ioctl
1084  *
1085  * @rdev: radeon_device pointer
1086  * @reg: register offset in bytes
1087  * @val: register value
1088  *
1089  * Returns 0 for success or -EINVAL for an invalid register
1090  *
1091  */
evergreen_get_allowed_info_register(struct radeon_device * rdev,u32 reg,u32 * val)1092 int evergreen_get_allowed_info_register(struct radeon_device *rdev,
1093 					u32 reg, u32 *val)
1094 {
1095 	switch (reg) {
1096 	case GRBM_STATUS:
1097 	case GRBM_STATUS_SE0:
1098 	case GRBM_STATUS_SE1:
1099 	case SRBM_STATUS:
1100 	case SRBM_STATUS2:
1101 	case DMA_STATUS_REG:
1102 	case UVD_STATUS:
1103 		*val = RREG32(reg);
1104 		return 0;
1105 	default:
1106 		return -EINVAL;
1107 	}
1108 }
1109 
evergreen_tiling_fields(unsigned tiling_flags,unsigned * bankw,unsigned * bankh,unsigned * mtaspect,unsigned * tile_split)1110 void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
1111 			     unsigned *bankh, unsigned *mtaspect,
1112 			     unsigned *tile_split)
1113 {
1114 	*bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
1115 	*bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
1116 	*mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
1117 	*tile_split = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
1118 	switch (*bankw) {
1119 	default:
1120 	case 1: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_1; break;
1121 	case 2: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_2; break;
1122 	case 4: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_4; break;
1123 	case 8: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_8; break;
1124 	}
1125 	switch (*bankh) {
1126 	default:
1127 	case 1: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_1; break;
1128 	case 2: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_2; break;
1129 	case 4: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_4; break;
1130 	case 8: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_8; break;
1131 	}
1132 	switch (*mtaspect) {
1133 	default:
1134 	case 1: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1; break;
1135 	case 2: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2; break;
1136 	case 4: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4; break;
1137 	case 8: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8; break;
1138 	}
1139 }
1140 
sumo_set_uvd_clock(struct radeon_device * rdev,u32 clock,u32 cntl_reg,u32 status_reg)1141 static int sumo_set_uvd_clock(struct radeon_device *rdev, u32 clock,
1142 			      u32 cntl_reg, u32 status_reg)
1143 {
1144 	int r, i;
1145 	struct atom_clock_dividers dividers;
1146 
1147 	r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
1148 					   clock, false, &dividers);
1149 	if (r)
1150 		return r;
1151 
1152 	WREG32_P(cntl_reg, dividers.post_div, ~(DCLK_DIR_CNTL_EN|DCLK_DIVIDER_MASK));
1153 
1154 	for (i = 0; i < 100; i++) {
1155 		if (RREG32(status_reg) & DCLK_STATUS)
1156 			break;
1157 		mdelay(10);
1158 	}
1159 	if (i == 100)
1160 		return -ETIMEDOUT;
1161 
1162 	return 0;
1163 }
1164 
sumo_set_uvd_clocks(struct radeon_device * rdev,u32 vclk,u32 dclk)1165 int sumo_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
1166 {
1167 	int r = 0;
1168 	u32 cg_scratch = RREG32(CG_SCRATCH1);
1169 
1170 	r = sumo_set_uvd_clock(rdev, vclk, CG_VCLK_CNTL, CG_VCLK_STATUS);
1171 	if (r)
1172 		goto done;
1173 	cg_scratch &= 0xffff0000;
1174 	cg_scratch |= vclk / 100; /* Mhz */
1175 
1176 	r = sumo_set_uvd_clock(rdev, dclk, CG_DCLK_CNTL, CG_DCLK_STATUS);
1177 	if (r)
1178 		goto done;
1179 	cg_scratch &= 0x0000ffff;
1180 	cg_scratch |= (dclk / 100) << 16; /* Mhz */
1181 
1182 done:
1183 	WREG32(CG_SCRATCH1, cg_scratch);
1184 
1185 	return r;
1186 }
1187 
evergreen_set_uvd_clocks(struct radeon_device * rdev,u32 vclk,u32 dclk)1188 int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
1189 {
1190 	/* start off with something large */
1191 	unsigned fb_div = 0, vclk_div = 0, dclk_div = 0;
1192 	int r;
1193 
1194 	/* bypass vclk and dclk with bclk */
1195 	WREG32_P(CG_UPLL_FUNC_CNTL_2,
1196 		VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
1197 		~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
1198 
1199 	/* put PLL in bypass mode */
1200 	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
1201 
1202 	if (!vclk || !dclk) {
1203 		/* keep the Bypass mode, put PLL to sleep */
1204 		WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
1205 		return 0;
1206 	}
1207 
1208 	r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 125000, 250000,
1209 					  16384, 0x03FFFFFF, 0, 128, 5,
1210 					  &fb_div, &vclk_div, &dclk_div);
1211 	if (r)
1212 		return r;
1213 
1214 	/* set VCO_MODE to 1 */
1215 	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK);
1216 
1217 	/* toggle UPLL_SLEEP to 1 then back to 0 */
1218 	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
1219 	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK);
1220 
1221 	/* deassert UPLL_RESET */
1222 	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
1223 
1224 	mdelay(1);
1225 
1226 	r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
1227 	if (r)
1228 		return r;
1229 
1230 	/* assert UPLL_RESET again */
1231 	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
1232 
1233 	/* disable spread spectrum. */
1234 	WREG32_P(CG_UPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK);
1235 
1236 	/* set feedback divider */
1237 	WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div), ~UPLL_FB_DIV_MASK);
1238 
1239 	/* set ref divider to 0 */
1240 	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_REF_DIV_MASK);
1241 
1242 	if (fb_div < 307200)
1243 		WREG32_P(CG_UPLL_FUNC_CNTL_4, 0, ~UPLL_SPARE_ISPARE9);
1244 	else
1245 		WREG32_P(CG_UPLL_FUNC_CNTL_4, UPLL_SPARE_ISPARE9, ~UPLL_SPARE_ISPARE9);
1246 
1247 	/* set PDIV_A and PDIV_B */
1248 	WREG32_P(CG_UPLL_FUNC_CNTL_2,
1249 		UPLL_PDIV_A(vclk_div) | UPLL_PDIV_B(dclk_div),
1250 		~(UPLL_PDIV_A_MASK | UPLL_PDIV_B_MASK));
1251 
1252 	/* give the PLL some time to settle */
1253 	mdelay(15);
1254 
1255 	/* deassert PLL_RESET */
1256 	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
1257 
1258 	mdelay(15);
1259 
1260 	/* switch from bypass mode to normal mode */
1261 	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
1262 
1263 	r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
1264 	if (r)
1265 		return r;
1266 
1267 	/* switch VCLK and DCLK selection */
1268 	WREG32_P(CG_UPLL_FUNC_CNTL_2,
1269 		VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
1270 		~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
1271 
1272 	mdelay(100);
1273 
1274 	return 0;
1275 }
1276 
evergreen_fix_pci_max_read_req_size(struct radeon_device * rdev)1277 void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
1278 {
1279 	int readrq;
1280 	u16 v;
1281 
1282 	readrq = pcie_get_readrq(rdev->pdev);
1283 	v = ffs(readrq) - 8;
1284 	/* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it
1285 	 * to avoid hangs or perfomance issues
1286 	 */
1287 	if ((v == 0) || (v == 6) || (v == 7))
1288 		pcie_set_readrq(rdev->pdev, 512);
1289 }
1290 
dce4_program_fmt(struct drm_encoder * encoder)1291 void dce4_program_fmt(struct drm_encoder *encoder)
1292 {
1293 	struct drm_device *dev = encoder->dev;
1294 	struct radeon_device *rdev = dev->dev_private;
1295 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1296 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
1297 	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1298 	int bpc = 0;
1299 	u32 tmp = 0;
1300 	enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
1301 
1302 	if (connector) {
1303 		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1304 		bpc = radeon_get_monitor_bpc(connector);
1305 		dither = radeon_connector->dither;
1306 	}
1307 
1308 	/* LVDS/eDP FMT is set up by atom */
1309 	if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
1310 		return;
1311 
1312 	/* not needed for analog */
1313 	if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
1314 	    (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
1315 		return;
1316 
1317 	if (bpc == 0)
1318 		return;
1319 
1320 	switch (bpc) {
1321 	case 6:
1322 		if (dither == RADEON_FMT_DITHER_ENABLE)
1323 			/* XXX sort out optimal dither settings */
1324 			tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
1325 				FMT_SPATIAL_DITHER_EN);
1326 		else
1327 			tmp |= FMT_TRUNCATE_EN;
1328 		break;
1329 	case 8:
1330 		if (dither == RADEON_FMT_DITHER_ENABLE)
1331 			/* XXX sort out optimal dither settings */
1332 			tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
1333 				FMT_RGB_RANDOM_ENABLE |
1334 				FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
1335 		else
1336 			tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
1337 		break;
1338 	case 10:
1339 	default:
1340 		/* not needed */
1341 		break;
1342 	}
1343 
1344 	WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
1345 }
1346 
dce4_is_in_vblank(struct radeon_device * rdev,int crtc)1347 static bool dce4_is_in_vblank(struct radeon_device *rdev, int crtc)
1348 {
1349 	if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
1350 		return true;
1351 	else
1352 		return false;
1353 }
1354 
dce4_is_counter_moving(struct radeon_device * rdev,int crtc)1355 static bool dce4_is_counter_moving(struct radeon_device *rdev, int crtc)
1356 {
1357 	u32 pos1, pos2;
1358 
1359 	pos1 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
1360 	pos2 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
1361 
1362 	if (pos1 != pos2)
1363 		return true;
1364 	else
1365 		return false;
1366 }
1367 
1368 /**
1369  * dce4_wait_for_vblank - vblank wait asic callback.
1370  *
1371  * @rdev: radeon_device pointer
1372  * @crtc: crtc to wait for vblank on
1373  *
1374  * Wait for vblank on the requested crtc (evergreen+).
1375  */
dce4_wait_for_vblank(struct radeon_device * rdev,int crtc)1376 void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
1377 {
1378 	unsigned i = 0;
1379 
1380 	if (crtc >= rdev->num_crtc)
1381 		return;
1382 
1383 	if (!(RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN))
1384 		return;
1385 
1386 	/* depending on when we hit vblank, we may be close to active; if so,
1387 	 * wait for another frame.
1388 	 */
1389 	while (dce4_is_in_vblank(rdev, crtc)) {
1390 		if (i++ % 100 == 0) {
1391 			if (!dce4_is_counter_moving(rdev, crtc))
1392 				break;
1393 		}
1394 	}
1395 
1396 	while (!dce4_is_in_vblank(rdev, crtc)) {
1397 		if (i++ % 100 == 0) {
1398 			if (!dce4_is_counter_moving(rdev, crtc))
1399 				break;
1400 		}
1401 	}
1402 }
1403 
1404 /**
1405  * evergreen_page_flip - pageflip callback.
1406  *
1407  * @rdev: radeon_device pointer
1408  * @crtc_id: crtc to cleanup pageflip on
1409  * @crtc_base: new address of the crtc (GPU MC address)
1410  * @async: asynchronous flip
1411  *
1412  * Triggers the actual pageflip by updating the primary
1413  * surface base address (evergreen+).
1414  */
evergreen_page_flip(struct radeon_device * rdev,int crtc_id,u64 crtc_base,bool async)1415 void evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base,
1416 			 bool async)
1417 {
1418 	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
1419 	struct drm_framebuffer *fb = radeon_crtc->base.primary->fb;
1420 
1421 	/* flip at hsync for async, default is vsync */
1422 	WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset,
1423 	       async ? EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN : 0);
1424 	/* update pitch */
1425 	WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset,
1426 	       fb->pitches[0] / fb->format->cpp[0]);
1427 	/* update the scanout addresses */
1428 	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
1429 	       upper_32_bits(crtc_base));
1430 	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
1431 	       (u32)crtc_base);
1432 	/* post the write */
1433 	RREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset);
1434 }
1435 
1436 /**
1437  * evergreen_page_flip_pending - check if page flip is still pending
1438  *
1439  * @rdev: radeon_device pointer
1440  * @crtc_id: crtc to check
1441  *
1442  * Returns the current update pending status.
1443  */
evergreen_page_flip_pending(struct radeon_device * rdev,int crtc_id)1444 bool evergreen_page_flip_pending(struct radeon_device *rdev, int crtc_id)
1445 {
1446 	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
1447 
1448 	/* Return current update_pending status: */
1449 	return !!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) &
1450 		EVERGREEN_GRPH_SURFACE_UPDATE_PENDING);
1451 }
1452 
1453 /* get temperature in millidegrees */
evergreen_get_temp(struct radeon_device * rdev)1454 int evergreen_get_temp(struct radeon_device *rdev)
1455 {
1456 	u32 temp, toffset;
1457 	int actual_temp = 0;
1458 
1459 	if (rdev->family == CHIP_JUNIPER) {
1460 		toffset = (RREG32(CG_THERMAL_CTRL) & TOFFSET_MASK) >>
1461 			TOFFSET_SHIFT;
1462 		temp = (RREG32(CG_TS0_STATUS) & TS0_ADC_DOUT_MASK) >>
1463 			TS0_ADC_DOUT_SHIFT;
1464 
1465 		if (toffset & 0x100)
1466 			actual_temp = temp / 2 - (0x200 - toffset);
1467 		else
1468 			actual_temp = temp / 2 + toffset;
1469 
1470 		actual_temp = actual_temp * 1000;
1471 
1472 	} else {
1473 		temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
1474 			ASIC_T_SHIFT;
1475 
1476 		if (temp & 0x400)
1477 			actual_temp = -256;
1478 		else if (temp & 0x200)
1479 			actual_temp = 255;
1480 		else if (temp & 0x100) {
1481 			actual_temp = temp & 0x1ff;
1482 			actual_temp |= ~0x1ff;
1483 		} else
1484 			actual_temp = temp & 0xff;
1485 
1486 		actual_temp = (actual_temp * 1000) / 2;
1487 	}
1488 
1489 	return actual_temp;
1490 }
1491 
sumo_get_temp(struct radeon_device * rdev)1492 int sumo_get_temp(struct radeon_device *rdev)
1493 {
1494 	u32 temp = RREG32(CG_THERMAL_STATUS) & 0xff;
1495 	int actual_temp = temp - 49;
1496 
1497 	return actual_temp * 1000;
1498 }
1499 
1500 /**
1501  * sumo_pm_init_profile - Initialize power profiles callback.
1502  *
1503  * @rdev: radeon_device pointer
1504  *
1505  * Initialize the power states used in profile mode
1506  * (sumo, trinity, SI).
1507  * Used for profile mode only.
1508  */
sumo_pm_init_profile(struct radeon_device * rdev)1509 void sumo_pm_init_profile(struct radeon_device *rdev)
1510 {
1511 	int idx;
1512 
1513 	/* default */
1514 	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
1515 	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
1516 	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
1517 	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
1518 
1519 	/* low,mid sh/mh */
1520 	if (rdev->flags & RADEON_IS_MOBILITY)
1521 		idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
1522 	else
1523 		idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
1524 
1525 	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
1526 	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
1527 	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
1528 	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
1529 
1530 	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
1531 	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
1532 	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
1533 	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
1534 
1535 	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
1536 	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
1537 	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
1538 	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
1539 
1540 	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
1541 	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
1542 	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
1543 	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
1544 
1545 	/* high sh/mh */
1546 	idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
1547 	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
1548 	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
1549 	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
1550 	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx =
1551 		rdev->pm.power_state[idx].num_clock_modes - 1;
1552 
1553 	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
1554 	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
1555 	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
1556 	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx =
1557 		rdev->pm.power_state[idx].num_clock_modes - 1;
1558 }
1559 
1560 /**
1561  * btc_pm_init_profile - Initialize power profiles callback.
1562  *
1563  * @rdev: radeon_device pointer
1564  *
1565  * Initialize the power states used in profile mode
1566  * (BTC, cayman).
1567  * Used for profile mode only.
1568  */
btc_pm_init_profile(struct radeon_device * rdev)1569 void btc_pm_init_profile(struct radeon_device *rdev)
1570 {
1571 	int idx;
1572 
1573 	/* default */
1574 	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
1575 	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
1576 	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
1577 	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
1578 	/* starting with BTC, there is one state that is used for both
1579 	 * MH and SH.  Difference is that we always use the high clock index for
1580 	 * mclk.
1581 	 */
1582 	if (rdev->flags & RADEON_IS_MOBILITY)
1583 		idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
1584 	else
1585 		idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
1586 	/* low sh */
1587 	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
1588 	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
1589 	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
1590 	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
1591 	/* mid sh */
1592 	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
1593 	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
1594 	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
1595 	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
1596 	/* high sh */
1597 	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
1598 	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
1599 	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
1600 	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
1601 	/* low mh */
1602 	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
1603 	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
1604 	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
1605 	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
1606 	/* mid mh */
1607 	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
1608 	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
1609 	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
1610 	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
1611 	/* high mh */
1612 	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
1613 	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
1614 	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
1615 	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
1616 }
1617 
1618 /**
1619  * evergreen_pm_misc - set additional pm hw parameters callback.
1620  *
1621  * @rdev: radeon_device pointer
1622  *
1623  * Set non-clock parameters associated with a power state
1624  * (voltage, etc.) (evergreen+).
1625  */
evergreen_pm_misc(struct radeon_device * rdev)1626 void evergreen_pm_misc(struct radeon_device *rdev)
1627 {
1628 	int req_ps_idx = rdev->pm.requested_power_state_index;
1629 	int req_cm_idx = rdev->pm.requested_clock_mode_index;
1630 	struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
1631 	struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
1632 
1633 	if (voltage->type == VOLTAGE_SW) {
1634 		/* 0xff0x are flags rather then an actual voltage */
1635 		if ((voltage->voltage & 0xff00) == 0xff00)
1636 			return;
1637 		if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) {
1638 			radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
1639 			rdev->pm.current_vddc = voltage->voltage;
1640 			DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage);
1641 		}
1642 
1643 		/* starting with BTC, there is one state that is used for both
1644 		 * MH and SH.  Difference is that we always use the high clock index for
1645 		 * mclk and vddci.
1646 		 */
1647 		if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
1648 		    (rdev->family >= CHIP_BARTS) &&
1649 		    rdev->pm.active_crtc_count &&
1650 		    ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
1651 		     (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
1652 			voltage = &rdev->pm.power_state[req_ps_idx].
1653 				clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].voltage;
1654 
1655 		/* 0xff0x are flags rather then an actual voltage */
1656 		if ((voltage->vddci & 0xff00) == 0xff00)
1657 			return;
1658 		if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) {
1659 			radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI);
1660 			rdev->pm.current_vddci = voltage->vddci;
1661 			DRM_DEBUG("Setting: vddci: %d\n", voltage->vddci);
1662 		}
1663 	}
1664 }
1665 
1666 /**
1667  * evergreen_pm_prepare - pre-power state change callback.
1668  *
1669  * @rdev: radeon_device pointer
1670  *
1671  * Prepare for a power state change (evergreen+).
1672  */
evergreen_pm_prepare(struct radeon_device * rdev)1673 void evergreen_pm_prepare(struct radeon_device *rdev)
1674 {
1675 	struct drm_device *ddev = rdev->ddev;
1676 	struct drm_crtc *crtc;
1677 	struct radeon_crtc *radeon_crtc;
1678 	u32 tmp;
1679 
1680 	/* disable any active CRTCs */
1681 	list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
1682 		radeon_crtc = to_radeon_crtc(crtc);
1683 		if (radeon_crtc->enabled) {
1684 			tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
1685 			tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
1686 			WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
1687 		}
1688 	}
1689 }
1690 
1691 /**
1692  * evergreen_pm_finish - post-power state change callback.
1693  *
1694  * @rdev: radeon_device pointer
1695  *
1696  * Clean up after a power state change (evergreen+).
1697  */
evergreen_pm_finish(struct radeon_device * rdev)1698 void evergreen_pm_finish(struct radeon_device *rdev)
1699 {
1700 	struct drm_device *ddev = rdev->ddev;
1701 	struct drm_crtc *crtc;
1702 	struct radeon_crtc *radeon_crtc;
1703 	u32 tmp;
1704 
1705 	/* enable any active CRTCs */
1706 	list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
1707 		radeon_crtc = to_radeon_crtc(crtc);
1708 		if (radeon_crtc->enabled) {
1709 			tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
1710 			tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
1711 			WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
1712 		}
1713 	}
1714 }
1715 
1716 /**
1717  * evergreen_hpd_sense - hpd sense callback.
1718  *
1719  * @rdev: radeon_device pointer
1720  * @hpd: hpd (hotplug detect) pin
1721  *
1722  * Checks if a digital monitor is connected (evergreen+).
1723  * Returns true if connected, false if not connected.
1724  */
evergreen_hpd_sense(struct radeon_device * rdev,enum radeon_hpd_id hpd)1725 bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
1726 {
1727 	if (hpd == RADEON_HPD_NONE)
1728 		return false;
1729 
1730 	return !!(RREG32(DC_HPDx_INT_STATUS_REG(hpd)) & DC_HPDx_SENSE);
1731 }
1732 
1733 /**
1734  * evergreen_hpd_set_polarity - hpd set polarity callback.
1735  *
1736  * @rdev: radeon_device pointer
1737  * @hpd: hpd (hotplug detect) pin
1738  *
1739  * Set the polarity of the hpd pin (evergreen+).
1740  */
evergreen_hpd_set_polarity(struct radeon_device * rdev,enum radeon_hpd_id hpd)1741 void evergreen_hpd_set_polarity(struct radeon_device *rdev,
1742 				enum radeon_hpd_id hpd)
1743 {
1744 	bool connected = evergreen_hpd_sense(rdev, hpd);
1745 
1746 	if (hpd == RADEON_HPD_NONE)
1747 		return;
1748 
1749 	if (connected)
1750 		WREG32_AND(DC_HPDx_INT_CONTROL(hpd), ~DC_HPDx_INT_POLARITY);
1751 	else
1752 		WREG32_OR(DC_HPDx_INT_CONTROL(hpd), DC_HPDx_INT_POLARITY);
1753 }
1754 
1755 /**
1756  * evergreen_hpd_init - hpd setup callback.
1757  *
1758  * @rdev: radeon_device pointer
1759  *
1760  * Setup the hpd pins used by the card (evergreen+).
1761  * Enable the pin, set the polarity, and enable the hpd interrupts.
1762  */
evergreen_hpd_init(struct radeon_device * rdev)1763 void evergreen_hpd_init(struct radeon_device *rdev)
1764 {
1765 	struct drm_device *dev = rdev->ddev;
1766 	struct drm_connector *connector;
1767 	unsigned enabled = 0;
1768 	u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
1769 		DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
1770 
1771 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1772 		enum radeon_hpd_id hpd =
1773 			to_radeon_connector(connector)->hpd.hpd;
1774 
1775 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
1776 		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
1777 			/* don't try to enable hpd on eDP or LVDS avoid breaking the
1778 			 * aux dp channel on imac and help (but not completely fix)
1779 			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
1780 			 * also avoid interrupt storms during dpms.
1781 			 */
1782 			continue;
1783 		}
1784 
1785 		if (hpd == RADEON_HPD_NONE)
1786 			continue;
1787 
1788 		WREG32(DC_HPDx_CONTROL(hpd), tmp);
1789 		enabled |= 1 << hpd;
1790 
1791 		radeon_hpd_set_polarity(rdev, hpd);
1792 	}
1793 	radeon_irq_kms_enable_hpd(rdev, enabled);
1794 }
1795 
1796 /**
1797  * evergreen_hpd_fini - hpd tear down callback.
1798  *
1799  * @rdev: radeon_device pointer
1800  *
1801  * Tear down the hpd pins used by the card (evergreen+).
1802  * Disable the hpd interrupts.
1803  */
evergreen_hpd_fini(struct radeon_device * rdev)1804 void evergreen_hpd_fini(struct radeon_device *rdev)
1805 {
1806 	struct drm_device *dev = rdev->ddev;
1807 	struct drm_connector *connector;
1808 	unsigned disabled = 0;
1809 
1810 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1811 		enum radeon_hpd_id hpd =
1812 			to_radeon_connector(connector)->hpd.hpd;
1813 
1814 		if (hpd == RADEON_HPD_NONE)
1815 			continue;
1816 
1817 		WREG32(DC_HPDx_CONTROL(hpd), 0);
1818 		disabled |= 1 << hpd;
1819 	}
1820 	radeon_irq_kms_disable_hpd(rdev, disabled);
1821 }
1822 
1823 /* watermark setup */
1824 
evergreen_line_buffer_adjust(struct radeon_device * rdev,struct radeon_crtc * radeon_crtc,struct drm_display_mode * mode,struct drm_display_mode * other_mode)1825 static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
1826 					struct radeon_crtc *radeon_crtc,
1827 					struct drm_display_mode *mode,
1828 					struct drm_display_mode *other_mode)
1829 {
1830 	u32 tmp, buffer_alloc, i;
1831 	u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
1832 	/*
1833 	 * Line Buffer Setup
1834 	 * There are 3 line buffers, each one shared by 2 display controllers.
1835 	 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
1836 	 * the display controllers.  The paritioning is done via one of four
1837 	 * preset allocations specified in bits 2:0:
1838 	 * first display controller
1839 	 *  0 - first half of lb (3840 * 2)
1840 	 *  1 - first 3/4 of lb (5760 * 2)
1841 	 *  2 - whole lb (7680 * 2), other crtc must be disabled
1842 	 *  3 - first 1/4 of lb (1920 * 2)
1843 	 * second display controller
1844 	 *  4 - second half of lb (3840 * 2)
1845 	 *  5 - second 3/4 of lb (5760 * 2)
1846 	 *  6 - whole lb (7680 * 2), other crtc must be disabled
1847 	 *  7 - last 1/4 of lb (1920 * 2)
1848 	 */
1849 	/* this can get tricky if we have two large displays on a paired group
1850 	 * of crtcs.  Ideally for multiple large displays we'd assign them to
1851 	 * non-linked crtcs for maximum line buffer allocation.
1852 	 */
1853 	if (radeon_crtc->base.enabled && mode) {
1854 		if (other_mode) {
1855 			tmp = 0; /* 1/2 */
1856 			buffer_alloc = 1;
1857 		} else {
1858 			tmp = 2; /* whole */
1859 			buffer_alloc = 2;
1860 		}
1861 	} else {
1862 		tmp = 0;
1863 		buffer_alloc = 0;
1864 	}
1865 
1866 	/* second controller of the pair uses second half of the lb */
1867 	if (radeon_crtc->crtc_id % 2)
1868 		tmp += 4;
1869 	WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp);
1870 
1871 	if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
1872 		WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
1873 		       DMIF_BUFFERS_ALLOCATED(buffer_alloc));
1874 		for (i = 0; i < rdev->usec_timeout; i++) {
1875 			if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
1876 			    DMIF_BUFFERS_ALLOCATED_COMPLETED)
1877 				break;
1878 			udelay(1);
1879 		}
1880 	}
1881 
1882 	if (radeon_crtc->base.enabled && mode) {
1883 		switch (tmp) {
1884 		case 0:
1885 		case 4:
1886 		default:
1887 			if (ASIC_IS_DCE5(rdev))
1888 				return 4096 * 2;
1889 			else
1890 				return 3840 * 2;
1891 		case 1:
1892 		case 5:
1893 			if (ASIC_IS_DCE5(rdev))
1894 				return 6144 * 2;
1895 			else
1896 				return 5760 * 2;
1897 		case 2:
1898 		case 6:
1899 			if (ASIC_IS_DCE5(rdev))
1900 				return 8192 * 2;
1901 			else
1902 				return 7680 * 2;
1903 		case 3:
1904 		case 7:
1905 			if (ASIC_IS_DCE5(rdev))
1906 				return 2048 * 2;
1907 			else
1908 				return 1920 * 2;
1909 		}
1910 	}
1911 
1912 	/* controller not enabled, so no lb used */
1913 	return 0;
1914 }
1915 
evergreen_get_number_of_dram_channels(struct radeon_device * rdev)1916 u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev)
1917 {
1918 	u32 tmp = RREG32(MC_SHARED_CHMAP);
1919 
1920 	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
1921 	case 0:
1922 	default:
1923 		return 1;
1924 	case 1:
1925 		return 2;
1926 	case 2:
1927 		return 4;
1928 	case 3:
1929 		return 8;
1930 	}
1931 }
1932 
1933 struct evergreen_wm_params {
1934 	u32 dram_channels; /* number of dram channels */
1935 	u32 yclk;          /* bandwidth per dram data pin in kHz */
1936 	u32 sclk;          /* engine clock in kHz */
1937 	u32 disp_clk;      /* display clock in kHz */
1938 	u32 src_width;     /* viewport width */
1939 	u32 active_time;   /* active display time in ns */
1940 	u32 blank_time;    /* blank time in ns */
1941 	bool interlaced;    /* mode is interlaced */
1942 	fixed20_12 vsc;    /* vertical scale ratio */
1943 	u32 num_heads;     /* number of active crtcs */
1944 	u32 bytes_per_pixel; /* bytes per pixel display + overlay */
1945 	u32 lb_size;       /* line buffer allocated to pipe */
1946 	u32 vtaps;         /* vertical scaler taps */
1947 };
1948 
evergreen_dram_bandwidth(struct evergreen_wm_params * wm)1949 static u32 evergreen_dram_bandwidth(struct evergreen_wm_params *wm)
1950 {
1951 	/* Calculate DRAM Bandwidth and the part allocated to display. */
1952 	fixed20_12 dram_efficiency; /* 0.7 */
1953 	fixed20_12 yclk, dram_channels, bandwidth;
1954 	fixed20_12 a;
1955 
1956 	a.full = dfixed_const(1000);
1957 	yclk.full = dfixed_const(wm->yclk);
1958 	yclk.full = dfixed_div(yclk, a);
1959 	dram_channels.full = dfixed_const(wm->dram_channels * 4);
1960 	a.full = dfixed_const(10);
1961 	dram_efficiency.full = dfixed_const(7);
1962 	dram_efficiency.full = dfixed_div(dram_efficiency, a);
1963 	bandwidth.full = dfixed_mul(dram_channels, yclk);
1964 	bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
1965 
1966 	return dfixed_trunc(bandwidth);
1967 }
1968 
evergreen_dram_bandwidth_for_display(struct evergreen_wm_params * wm)1969 static u32 evergreen_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
1970 {
1971 	/* Calculate DRAM Bandwidth and the part allocated to display. */
1972 	fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
1973 	fixed20_12 yclk, dram_channels, bandwidth;
1974 	fixed20_12 a;
1975 
1976 	a.full = dfixed_const(1000);
1977 	yclk.full = dfixed_const(wm->yclk);
1978 	yclk.full = dfixed_div(yclk, a);
1979 	dram_channels.full = dfixed_const(wm->dram_channels * 4);
1980 	a.full = dfixed_const(10);
1981 	disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
1982 	disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
1983 	bandwidth.full = dfixed_mul(dram_channels, yclk);
1984 	bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
1985 
1986 	return dfixed_trunc(bandwidth);
1987 }
1988 
evergreen_data_return_bandwidth(struct evergreen_wm_params * wm)1989 static u32 evergreen_data_return_bandwidth(struct evergreen_wm_params *wm)
1990 {
1991 	/* Calculate the display Data return Bandwidth */
1992 	fixed20_12 return_efficiency; /* 0.8 */
1993 	fixed20_12 sclk, bandwidth;
1994 	fixed20_12 a;
1995 
1996 	a.full = dfixed_const(1000);
1997 	sclk.full = dfixed_const(wm->sclk);
1998 	sclk.full = dfixed_div(sclk, a);
1999 	a.full = dfixed_const(10);
2000 	return_efficiency.full = dfixed_const(8);
2001 	return_efficiency.full = dfixed_div(return_efficiency, a);
2002 	a.full = dfixed_const(32);
2003 	bandwidth.full = dfixed_mul(a, sclk);
2004 	bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
2005 
2006 	return dfixed_trunc(bandwidth);
2007 }
2008 
evergreen_dmif_request_bandwidth(struct evergreen_wm_params * wm)2009 static u32 evergreen_dmif_request_bandwidth(struct evergreen_wm_params *wm)
2010 {
2011 	/* Calculate the DMIF Request Bandwidth */
2012 	fixed20_12 disp_clk_request_efficiency; /* 0.8 */
2013 	fixed20_12 disp_clk, bandwidth;
2014 	fixed20_12 a;
2015 
2016 	a.full = dfixed_const(1000);
2017 	disp_clk.full = dfixed_const(wm->disp_clk);
2018 	disp_clk.full = dfixed_div(disp_clk, a);
2019 	a.full = dfixed_const(10);
2020 	disp_clk_request_efficiency.full = dfixed_const(8);
2021 	disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
2022 	a.full = dfixed_const(32);
2023 	bandwidth.full = dfixed_mul(a, disp_clk);
2024 	bandwidth.full = dfixed_mul(bandwidth, disp_clk_request_efficiency);
2025 
2026 	return dfixed_trunc(bandwidth);
2027 }
2028 
evergreen_available_bandwidth(struct evergreen_wm_params * wm)2029 static u32 evergreen_available_bandwidth(struct evergreen_wm_params *wm)
2030 {
2031 	/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
2032 	u32 dram_bandwidth = evergreen_dram_bandwidth(wm);
2033 	u32 data_return_bandwidth = evergreen_data_return_bandwidth(wm);
2034 	u32 dmif_req_bandwidth = evergreen_dmif_request_bandwidth(wm);
2035 
2036 	return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
2037 }
2038 
evergreen_average_bandwidth(struct evergreen_wm_params * wm)2039 static u32 evergreen_average_bandwidth(struct evergreen_wm_params *wm)
2040 {
2041 	/* Calculate the display mode Average Bandwidth
2042 	 * DisplayMode should contain the source and destination dimensions,
2043 	 * timing, etc.
2044 	 */
2045 	fixed20_12 bpp;
2046 	fixed20_12 line_time;
2047 	fixed20_12 src_width;
2048 	fixed20_12 bandwidth;
2049 	fixed20_12 a;
2050 
2051 	a.full = dfixed_const(1000);
2052 	line_time.full = dfixed_const(wm->active_time + wm->blank_time);
2053 	line_time.full = dfixed_div(line_time, a);
2054 	bpp.full = dfixed_const(wm->bytes_per_pixel);
2055 	src_width.full = dfixed_const(wm->src_width);
2056 	bandwidth.full = dfixed_mul(src_width, bpp);
2057 	bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
2058 	bandwidth.full = dfixed_div(bandwidth, line_time);
2059 
2060 	return dfixed_trunc(bandwidth);
2061 }
2062 
evergreen_latency_watermark(struct evergreen_wm_params * wm)2063 static u32 evergreen_latency_watermark(struct evergreen_wm_params *wm)
2064 {
2065 	/* First calcualte the latency in ns */
2066 	u32 mc_latency = 2000; /* 2000 ns. */
2067 	u32 available_bandwidth = evergreen_available_bandwidth(wm);
2068 	u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
2069 	u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
2070 	u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
2071 	u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
2072 		(wm->num_heads * cursor_line_pair_return_time);
2073 	u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
2074 	u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
2075 	fixed20_12 a, b, c;
2076 
2077 	if (wm->num_heads == 0)
2078 		return 0;
2079 
2080 	a.full = dfixed_const(2);
2081 	b.full = dfixed_const(1);
2082 	if ((wm->vsc.full > a.full) ||
2083 	    ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
2084 	    (wm->vtaps >= 5) ||
2085 	    ((wm->vsc.full >= a.full) && wm->interlaced))
2086 		max_src_lines_per_dst_line = 4;
2087 	else
2088 		max_src_lines_per_dst_line = 2;
2089 
2090 	a.full = dfixed_const(available_bandwidth);
2091 	b.full = dfixed_const(wm->num_heads);
2092 	a.full = dfixed_div(a, b);
2093 
2094 	lb_fill_bw = min(dfixed_trunc(a), wm->disp_clk * wm->bytes_per_pixel / 1000);
2095 
2096 	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
2097 	b.full = dfixed_const(1000);
2098 	c.full = dfixed_const(lb_fill_bw);
2099 	b.full = dfixed_div(c, b);
2100 	a.full = dfixed_div(a, b);
2101 	line_fill_time = dfixed_trunc(a);
2102 
2103 	if (line_fill_time < wm->active_time)
2104 		return latency;
2105 	else
2106 		return latency + (line_fill_time - wm->active_time);
2107 
2108 }
2109 
evergreen_average_bandwidth_vs_dram_bandwidth_for_display(struct evergreen_wm_params * wm)2110 static bool evergreen_average_bandwidth_vs_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
2111 {
2112 	if (evergreen_average_bandwidth(wm) <=
2113 	    (evergreen_dram_bandwidth_for_display(wm) / wm->num_heads))
2114 		return true;
2115 	else
2116 		return false;
2117 };
2118 
evergreen_average_bandwidth_vs_available_bandwidth(struct evergreen_wm_params * wm)2119 static bool evergreen_average_bandwidth_vs_available_bandwidth(struct evergreen_wm_params *wm)
2120 {
2121 	if (evergreen_average_bandwidth(wm) <=
2122 	    (evergreen_available_bandwidth(wm) / wm->num_heads))
2123 		return true;
2124 	else
2125 		return false;
2126 };
2127 
evergreen_check_latency_hiding(struct evergreen_wm_params * wm)2128 static bool evergreen_check_latency_hiding(struct evergreen_wm_params *wm)
2129 {
2130 	u32 lb_partitions = wm->lb_size / wm->src_width;
2131 	u32 line_time = wm->active_time + wm->blank_time;
2132 	u32 latency_tolerant_lines;
2133 	u32 latency_hiding;
2134 	fixed20_12 a;
2135 
2136 	a.full = dfixed_const(1);
2137 	if (wm->vsc.full > a.full)
2138 		latency_tolerant_lines = 1;
2139 	else {
2140 		if (lb_partitions <= (wm->vtaps + 1))
2141 			latency_tolerant_lines = 1;
2142 		else
2143 			latency_tolerant_lines = 2;
2144 	}
2145 
2146 	latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
2147 
2148 	if (evergreen_latency_watermark(wm) <= latency_hiding)
2149 		return true;
2150 	else
2151 		return false;
2152 }
2153 
evergreen_program_watermarks(struct radeon_device * rdev,struct radeon_crtc * radeon_crtc,u32 lb_size,u32 num_heads)2154 static void evergreen_program_watermarks(struct radeon_device *rdev,
2155 					 struct radeon_crtc *radeon_crtc,
2156 					 u32 lb_size, u32 num_heads)
2157 {
2158 	struct drm_display_mode *mode = &radeon_crtc->base.mode;
2159 	struct evergreen_wm_params wm_low, wm_high;
2160 	u32 dram_channels;
2161 	u32 active_time;
2162 	u32 line_time = 0;
2163 	u32 latency_watermark_a = 0, latency_watermark_b = 0;
2164 	u32 priority_a_mark = 0, priority_b_mark = 0;
2165 	u32 priority_a_cnt = PRIORITY_OFF;
2166 	u32 priority_b_cnt = PRIORITY_OFF;
2167 	u32 pipe_offset = radeon_crtc->crtc_id * 16;
2168 	u32 tmp, arb_control3;
2169 	fixed20_12 a, b, c;
2170 
2171 	if (radeon_crtc->base.enabled && num_heads && mode) {
2172 		active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
2173 					    (u32)mode->clock);
2174 		line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
2175 					  (u32)mode->clock);
2176 		line_time = min(line_time, (u32)65535);
2177 		priority_a_cnt = 0;
2178 		priority_b_cnt = 0;
2179 		dram_channels = evergreen_get_number_of_dram_channels(rdev);
2180 
2181 		/* watermark for high clocks */
2182 		if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
2183 			wm_high.yclk =
2184 				radeon_dpm_get_mclk(rdev, false) * 10;
2185 			wm_high.sclk =
2186 				radeon_dpm_get_sclk(rdev, false) * 10;
2187 		} else {
2188 			wm_high.yclk = rdev->pm.current_mclk * 10;
2189 			wm_high.sclk = rdev->pm.current_sclk * 10;
2190 		}
2191 
2192 		wm_high.disp_clk = mode->clock;
2193 		wm_high.src_width = mode->crtc_hdisplay;
2194 		wm_high.active_time = active_time;
2195 		wm_high.blank_time = line_time - wm_high.active_time;
2196 		wm_high.interlaced = false;
2197 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2198 			wm_high.interlaced = true;
2199 		wm_high.vsc = radeon_crtc->vsc;
2200 		wm_high.vtaps = 1;
2201 		if (radeon_crtc->rmx_type != RMX_OFF)
2202 			wm_high.vtaps = 2;
2203 		wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
2204 		wm_high.lb_size = lb_size;
2205 		wm_high.dram_channels = dram_channels;
2206 		wm_high.num_heads = num_heads;
2207 
2208 		/* watermark for low clocks */
2209 		if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
2210 			wm_low.yclk =
2211 				radeon_dpm_get_mclk(rdev, true) * 10;
2212 			wm_low.sclk =
2213 				radeon_dpm_get_sclk(rdev, true) * 10;
2214 		} else {
2215 			wm_low.yclk = rdev->pm.current_mclk * 10;
2216 			wm_low.sclk = rdev->pm.current_sclk * 10;
2217 		}
2218 
2219 		wm_low.disp_clk = mode->clock;
2220 		wm_low.src_width = mode->crtc_hdisplay;
2221 		wm_low.active_time = active_time;
2222 		wm_low.blank_time = line_time - wm_low.active_time;
2223 		wm_low.interlaced = false;
2224 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2225 			wm_low.interlaced = true;
2226 		wm_low.vsc = radeon_crtc->vsc;
2227 		wm_low.vtaps = 1;
2228 		if (radeon_crtc->rmx_type != RMX_OFF)
2229 			wm_low.vtaps = 2;
2230 		wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
2231 		wm_low.lb_size = lb_size;
2232 		wm_low.dram_channels = dram_channels;
2233 		wm_low.num_heads = num_heads;
2234 
2235 		/* set for high clocks */
2236 		latency_watermark_a = min(evergreen_latency_watermark(&wm_high), (u32)65535);
2237 		/* set for low clocks */
2238 		latency_watermark_b = min(evergreen_latency_watermark(&wm_low), (u32)65535);
2239 
2240 		/* possibly force display priority to high */
2241 		/* should really do this at mode validation time... */
2242 		if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
2243 		    !evergreen_average_bandwidth_vs_available_bandwidth(&wm_high) ||
2244 		    !evergreen_check_latency_hiding(&wm_high) ||
2245 		    (rdev->disp_priority == 2)) {
2246 			DRM_DEBUG_KMS("force priority a to high\n");
2247 			priority_a_cnt |= PRIORITY_ALWAYS_ON;
2248 		}
2249 		if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
2250 		    !evergreen_average_bandwidth_vs_available_bandwidth(&wm_low) ||
2251 		    !evergreen_check_latency_hiding(&wm_low) ||
2252 		    (rdev->disp_priority == 2)) {
2253 			DRM_DEBUG_KMS("force priority b to high\n");
2254 			priority_b_cnt |= PRIORITY_ALWAYS_ON;
2255 		}
2256 
2257 		a.full = dfixed_const(1000);
2258 		b.full = dfixed_const(mode->clock);
2259 		b.full = dfixed_div(b, a);
2260 		c.full = dfixed_const(latency_watermark_a);
2261 		c.full = dfixed_mul(c, b);
2262 		c.full = dfixed_mul(c, radeon_crtc->hsc);
2263 		c.full = dfixed_div(c, a);
2264 		a.full = dfixed_const(16);
2265 		c.full = dfixed_div(c, a);
2266 		priority_a_mark = dfixed_trunc(c);
2267 		priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
2268 
2269 		a.full = dfixed_const(1000);
2270 		b.full = dfixed_const(mode->clock);
2271 		b.full = dfixed_div(b, a);
2272 		c.full = dfixed_const(latency_watermark_b);
2273 		c.full = dfixed_mul(c, b);
2274 		c.full = dfixed_mul(c, radeon_crtc->hsc);
2275 		c.full = dfixed_div(c, a);
2276 		a.full = dfixed_const(16);
2277 		c.full = dfixed_div(c, a);
2278 		priority_b_mark = dfixed_trunc(c);
2279 		priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
2280 
2281 		/* Save number of lines the linebuffer leads before the scanout */
2282 		radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
2283 	}
2284 
2285 	/* select wm A */
2286 	arb_control3 = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
2287 	tmp = arb_control3;
2288 	tmp &= ~LATENCY_WATERMARK_MASK(3);
2289 	tmp |= LATENCY_WATERMARK_MASK(1);
2290 	WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
2291 	WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
2292 	       (LATENCY_LOW_WATERMARK(latency_watermark_a) |
2293 		LATENCY_HIGH_WATERMARK(line_time)));
2294 	/* select wm B */
2295 	tmp = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
2296 	tmp &= ~LATENCY_WATERMARK_MASK(3);
2297 	tmp |= LATENCY_WATERMARK_MASK(2);
2298 	WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
2299 	WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
2300 	       (LATENCY_LOW_WATERMARK(latency_watermark_b) |
2301 		LATENCY_HIGH_WATERMARK(line_time)));
2302 	/* restore original selection */
2303 	WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, arb_control3);
2304 
2305 	/* write the priority marks */
2306 	WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
2307 	WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
2308 
2309 	/* save values for DPM */
2310 	radeon_crtc->line_time = line_time;
2311 	radeon_crtc->wm_high = latency_watermark_a;
2312 	radeon_crtc->wm_low = latency_watermark_b;
2313 }
2314 
2315 /**
2316  * evergreen_bandwidth_update - update display watermarks callback.
2317  *
2318  * @rdev: radeon_device pointer
2319  *
2320  * Update the display watermarks based on the requested mode(s)
2321  * (evergreen+).
2322  */
evergreen_bandwidth_update(struct radeon_device * rdev)2323 void evergreen_bandwidth_update(struct radeon_device *rdev)
2324 {
2325 	struct drm_display_mode *mode0 = NULL;
2326 	struct drm_display_mode *mode1 = NULL;
2327 	u32 num_heads = 0, lb_size;
2328 	int i;
2329 
2330 	if (!rdev->mode_info.mode_config_initialized)
2331 		return;
2332 
2333 	radeon_update_display_priority(rdev);
2334 
2335 	for (i = 0; i < rdev->num_crtc; i++) {
2336 		if (rdev->mode_info.crtcs[i]->base.enabled)
2337 			num_heads++;
2338 	}
2339 	for (i = 0; i < rdev->num_crtc; i += 2) {
2340 		mode0 = &rdev->mode_info.crtcs[i]->base.mode;
2341 		mode1 = &rdev->mode_info.crtcs[i+1]->base.mode;
2342 		lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1);
2343 		evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
2344 		lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0);
2345 		evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads);
2346 	}
2347 }
2348 
2349 /**
2350  * evergreen_mc_wait_for_idle - wait for MC idle callback.
2351  *
2352  * @rdev: radeon_device pointer
2353  *
2354  * Wait for the MC (memory controller) to be idle.
2355  * (evergreen+).
2356  * Returns 0 if the MC is idle, -1 if not.
2357  */
evergreen_mc_wait_for_idle(struct radeon_device * rdev)2358 int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
2359 {
2360 	unsigned i;
2361 	u32 tmp;
2362 
2363 	for (i = 0; i < rdev->usec_timeout; i++) {
2364 		/* read MC_STATUS */
2365 		tmp = RREG32(SRBM_STATUS) & 0x1F00;
2366 		if (!tmp)
2367 			return 0;
2368 		udelay(1);
2369 	}
2370 	return -1;
2371 }
2372 
2373 /*
2374  * GART
2375  */
evergreen_pcie_gart_tlb_flush(struct radeon_device * rdev)2376 void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev)
2377 {
2378 	unsigned i;
2379 	u32 tmp;
2380 
2381 	WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
2382 
2383 	WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
2384 	for (i = 0; i < rdev->usec_timeout; i++) {
2385 		/* read MC_STATUS */
2386 		tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
2387 		tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
2388 		if (tmp == 2) {
2389 			pr_warn("[drm] r600 flush TLB failed\n");
2390 			return;
2391 		}
2392 		if (tmp) {
2393 			return;
2394 		}
2395 		udelay(1);
2396 	}
2397 }
2398 
evergreen_pcie_gart_enable(struct radeon_device * rdev)2399 static int evergreen_pcie_gart_enable(struct radeon_device *rdev)
2400 {
2401 	u32 tmp;
2402 	int r;
2403 
2404 	if (rdev->gart.robj == NULL) {
2405 		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
2406 		return -EINVAL;
2407 	}
2408 	r = radeon_gart_table_vram_pin(rdev);
2409 	if (r)
2410 		return r;
2411 	/* Setup L2 cache */
2412 	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
2413 				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
2414 				EFFECTIVE_L2_QUEUE_SIZE(7));
2415 	WREG32(VM_L2_CNTL2, 0);
2416 	WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
2417 	/* Setup TLB control */
2418 	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
2419 		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
2420 		SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
2421 		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
2422 	if (rdev->flags & RADEON_IS_IGP) {
2423 		WREG32(FUS_MC_VM_MD_L1_TLB0_CNTL, tmp);
2424 		WREG32(FUS_MC_VM_MD_L1_TLB1_CNTL, tmp);
2425 		WREG32(FUS_MC_VM_MD_L1_TLB2_CNTL, tmp);
2426 	} else {
2427 		WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
2428 		WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
2429 		WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
2430 		if ((rdev->family == CHIP_JUNIPER) ||
2431 		    (rdev->family == CHIP_CYPRESS) ||
2432 		    (rdev->family == CHIP_HEMLOCK) ||
2433 		    (rdev->family == CHIP_BARTS))
2434 			WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
2435 	}
2436 	WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
2437 	WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
2438 	WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
2439 	WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
2440 	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
2441 	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
2442 	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
2443 	WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
2444 				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
2445 	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
2446 			(u32)(rdev->dummy_page.addr >> 12));
2447 	WREG32(VM_CONTEXT1_CNTL, 0);
2448 
2449 	evergreen_pcie_gart_tlb_flush(rdev);
2450 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
2451 		 (unsigned)(rdev->mc.gtt_size >> 20),
2452 		 (unsigned long long)rdev->gart.table_addr);
2453 	rdev->gart.ready = true;
2454 	return 0;
2455 }
2456 
evergreen_pcie_gart_disable(struct radeon_device * rdev)2457 static void evergreen_pcie_gart_disable(struct radeon_device *rdev)
2458 {
2459 	u32 tmp;
2460 
2461 	/* Disable all tables */
2462 	WREG32(VM_CONTEXT0_CNTL, 0);
2463 	WREG32(VM_CONTEXT1_CNTL, 0);
2464 
2465 	/* Setup L2 cache */
2466 	WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
2467 				EFFECTIVE_L2_QUEUE_SIZE(7));
2468 	WREG32(VM_L2_CNTL2, 0);
2469 	WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
2470 	/* Setup TLB control */
2471 	tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
2472 	WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
2473 	WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
2474 	WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
2475 	WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
2476 	WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
2477 	WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
2478 	WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
2479 	radeon_gart_table_vram_unpin(rdev);
2480 }
2481 
evergreen_pcie_gart_fini(struct radeon_device * rdev)2482 static void evergreen_pcie_gart_fini(struct radeon_device *rdev)
2483 {
2484 	evergreen_pcie_gart_disable(rdev);
2485 	radeon_gart_table_vram_free(rdev);
2486 	radeon_gart_fini(rdev);
2487 }
2488 
2489 
evergreen_agp_enable(struct radeon_device * rdev)2490 static void evergreen_agp_enable(struct radeon_device *rdev)
2491 {
2492 	u32 tmp;
2493 
2494 	/* Setup L2 cache */
2495 	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
2496 				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
2497 				EFFECTIVE_L2_QUEUE_SIZE(7));
2498 	WREG32(VM_L2_CNTL2, 0);
2499 	WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
2500 	/* Setup TLB control */
2501 	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
2502 		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
2503 		SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
2504 		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
2505 	WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
2506 	WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
2507 	WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
2508 	WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
2509 	WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
2510 	WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
2511 	WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
2512 	WREG32(VM_CONTEXT0_CNTL, 0);
2513 	WREG32(VM_CONTEXT1_CNTL, 0);
2514 }
2515 
2516 static const unsigned ni_dig_offsets[] =
2517 {
2518 	NI_DIG0_REGISTER_OFFSET,
2519 	NI_DIG1_REGISTER_OFFSET,
2520 	NI_DIG2_REGISTER_OFFSET,
2521 	NI_DIG3_REGISTER_OFFSET,
2522 	NI_DIG4_REGISTER_OFFSET,
2523 	NI_DIG5_REGISTER_OFFSET
2524 };
2525 
2526 static const unsigned ni_tx_offsets[] =
2527 {
2528 	NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1,
2529 	NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1,
2530 	NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1,
2531 	NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1,
2532 	NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1,
2533 	NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1
2534 };
2535 
2536 static const unsigned evergreen_dp_offsets[] =
2537 {
2538 	EVERGREEN_DP0_REGISTER_OFFSET,
2539 	EVERGREEN_DP1_REGISTER_OFFSET,
2540 	EVERGREEN_DP2_REGISTER_OFFSET,
2541 	EVERGREEN_DP3_REGISTER_OFFSET,
2542 	EVERGREEN_DP4_REGISTER_OFFSET,
2543 	EVERGREEN_DP5_REGISTER_OFFSET
2544 };
2545 
2546 static const unsigned evergreen_disp_int_status[] =
2547 {
2548 	DISP_INTERRUPT_STATUS,
2549 	DISP_INTERRUPT_STATUS_CONTINUE,
2550 	DISP_INTERRUPT_STATUS_CONTINUE2,
2551 	DISP_INTERRUPT_STATUS_CONTINUE3,
2552 	DISP_INTERRUPT_STATUS_CONTINUE4,
2553 	DISP_INTERRUPT_STATUS_CONTINUE5
2554 };
2555 
2556 /*
2557  * Assumption is that EVERGREEN_CRTC_MASTER_EN enable for requested crtc
2558  * We go from crtc to connector and it is not relible  since it
2559  * should be an opposite direction .If crtc is enable then
2560  * find the dig_fe which selects this crtc and insure that it enable.
2561  * if such dig_fe is found then find dig_be which selects found dig_be and
2562  * insure that it enable and in DP_SST mode.
2563  * if UNIPHY_PLL_CONTROL1.enable then we should disconnect timing
2564  * from dp symbols clocks .
2565  */
evergreen_is_dp_sst_stream_enabled(struct radeon_device * rdev,unsigned crtc_id,unsigned * ret_dig_fe)2566 static bool evergreen_is_dp_sst_stream_enabled(struct radeon_device *rdev,
2567 					       unsigned crtc_id, unsigned *ret_dig_fe)
2568 {
2569 	unsigned i;
2570 	unsigned dig_fe;
2571 	unsigned dig_be;
2572 	unsigned dig_en_be;
2573 	unsigned uniphy_pll;
2574 	unsigned digs_fe_selected;
2575 	unsigned dig_be_mode;
2576 	unsigned dig_fe_mask;
2577 	bool is_enabled = false;
2578 	bool found_crtc = false;
2579 
2580 	/* loop through all running dig_fe to find selected crtc */
2581 	for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
2582 		dig_fe = RREG32(NI_DIG_FE_CNTL + ni_dig_offsets[i]);
2583 		if (dig_fe & NI_DIG_FE_CNTL_SYMCLK_FE_ON &&
2584 		    crtc_id == NI_DIG_FE_CNTL_SOURCE_SELECT(dig_fe)) {
2585 			/* found running pipe */
2586 			found_crtc = true;
2587 			dig_fe_mask = 1 << i;
2588 			dig_fe = i;
2589 			break;
2590 		}
2591 	}
2592 
2593 	if (found_crtc) {
2594 		/* loop through all running dig_be to find selected dig_fe */
2595 		for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
2596 			dig_be = RREG32(NI_DIG_BE_CNTL + ni_dig_offsets[i]);
2597 			/* if dig_fe_selected by dig_be? */
2598 			digs_fe_selected = NI_DIG_BE_CNTL_FE_SOURCE_SELECT(dig_be);
2599 			dig_be_mode = NI_DIG_FE_CNTL_MODE(dig_be);
2600 			if (dig_fe_mask &  digs_fe_selected &&
2601 			    /* if dig_be in sst mode? */
2602 			    dig_be_mode == NI_DIG_BE_DPSST) {
2603 				dig_en_be = RREG32(NI_DIG_BE_EN_CNTL +
2604 						   ni_dig_offsets[i]);
2605 				uniphy_pll = RREG32(NI_DCIO_UNIPHY0_PLL_CONTROL1 +
2606 						    ni_tx_offsets[i]);
2607 				/* dig_be enable and tx is running */
2608 				if (dig_en_be & NI_DIG_BE_EN_CNTL_ENABLE &&
2609 				    dig_en_be & NI_DIG_BE_EN_CNTL_SYMBCLK_ON &&
2610 				    uniphy_pll & NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE) {
2611 					is_enabled = true;
2612 					*ret_dig_fe = dig_fe;
2613 					break;
2614 				}
2615 			}
2616 		}
2617 	}
2618 
2619 	return is_enabled;
2620 }
2621 
2622 /*
2623  * Blank dig when in dp sst mode
2624  * Dig ignores crtc timing
2625  */
evergreen_blank_dp_output(struct radeon_device * rdev,unsigned dig_fe)2626 static void evergreen_blank_dp_output(struct radeon_device *rdev,
2627 				      unsigned dig_fe)
2628 {
2629 	unsigned stream_ctrl;
2630 	unsigned fifo_ctrl;
2631 	unsigned counter = 0;
2632 
2633 	if (dig_fe >= ARRAY_SIZE(evergreen_dp_offsets)) {
2634 		DRM_ERROR("invalid dig_fe %d\n", dig_fe);
2635 		return;
2636 	}
2637 
2638 	stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
2639 			     evergreen_dp_offsets[dig_fe]);
2640 	if (!(stream_ctrl & EVERGREEN_DP_VID_STREAM_CNTL_ENABLE)) {
2641 		DRM_ERROR("dig %d , should be enable\n", dig_fe);
2642 		return;
2643 	}
2644 
2645 	stream_ctrl &=~EVERGREEN_DP_VID_STREAM_CNTL_ENABLE;
2646 	WREG32(EVERGREEN_DP_VID_STREAM_CNTL +
2647 	       evergreen_dp_offsets[dig_fe], stream_ctrl);
2648 
2649 	stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
2650 			     evergreen_dp_offsets[dig_fe]);
2651 	while (counter < 32 && stream_ctrl & EVERGREEN_DP_VID_STREAM_STATUS) {
2652 		msleep(1);
2653 		counter++;
2654 		stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
2655 				     evergreen_dp_offsets[dig_fe]);
2656 	}
2657 	if (counter >= 32 )
2658 		DRM_ERROR("counter exceeds %d\n", counter);
2659 
2660 	fifo_ctrl = RREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe]);
2661 	fifo_ctrl |= EVERGREEN_DP_STEER_FIFO_RESET;
2662 	WREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe], fifo_ctrl);
2663 
2664 }
2665 
evergreen_mc_stop(struct radeon_device * rdev,struct evergreen_mc_save * save)2666 void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
2667 {
2668 	u32 crtc_enabled, tmp, frame_count, blackout;
2669 	int i, j;
2670 	unsigned dig_fe;
2671 
2672 	if (!ASIC_IS_NODCE(rdev)) {
2673 		save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
2674 		save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
2675 
2676 		/* disable VGA render */
2677 		WREG32(VGA_RENDER_CONTROL, 0);
2678 	}
2679 	/* blank the display controllers */
2680 	for (i = 0; i < rdev->num_crtc; i++) {
2681 		crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN;
2682 		if (crtc_enabled) {
2683 			save->crtc_enabled[i] = true;
2684 			if (ASIC_IS_DCE6(rdev)) {
2685 				tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
2686 				if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
2687 					radeon_wait_for_vblank(rdev, i);
2688 					WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
2689 					tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
2690 					WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
2691 					WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
2692 				}
2693 			} else {
2694 				tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
2695 				if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
2696 					radeon_wait_for_vblank(rdev, i);
2697 					WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
2698 					tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
2699 					WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
2700 					WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
2701 				}
2702 			}
2703 			/* wait for the next frame */
2704 			frame_count = radeon_get_vblank_counter(rdev, i);
2705 			for (j = 0; j < rdev->usec_timeout; j++) {
2706 				if (radeon_get_vblank_counter(rdev, i) != frame_count)
2707 					break;
2708 				udelay(1);
2709 			}
2710 			/*we should disable dig if it drives dp sst*/
2711 			/*but we are in radeon_device_init and the topology is unknown*/
2712 			/*and it is available after radeon_modeset_init*/
2713 			/*the following method radeon_atom_encoder_dpms_dig*/
2714 			/*does the job if we initialize it properly*/
2715 			/*for now we do it this manually*/
2716 			/**/
2717 			if (ASIC_IS_DCE5(rdev) &&
2718 			    evergreen_is_dp_sst_stream_enabled(rdev, i ,&dig_fe))
2719 				evergreen_blank_dp_output(rdev, dig_fe);
2720 			/*we could remove 6 lines below*/
2721 			/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
2722 			WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
2723 			tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
2724 			tmp &= ~EVERGREEN_CRTC_MASTER_EN;
2725 			WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
2726 			WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
2727 			save->crtc_enabled[i] = false;
2728 			/* ***** */
2729 		} else {
2730 			save->crtc_enabled[i] = false;
2731 		}
2732 	}
2733 
2734 	radeon_mc_wait_for_idle(rdev);
2735 
2736 	blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
2737 	if ((blackout & BLACKOUT_MODE_MASK) != 1) {
2738 		/* Block CPU access */
2739 		WREG32(BIF_FB_EN, 0);
2740 		/* blackout the MC */
2741 		blackout &= ~BLACKOUT_MODE_MASK;
2742 		WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
2743 	}
2744 	/* wait for the MC to settle */
2745 	udelay(100);
2746 
2747 	/* lock double buffered regs */
2748 	for (i = 0; i < rdev->num_crtc; i++) {
2749 		if (save->crtc_enabled[i]) {
2750 			tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
2751 			if (!(tmp & EVERGREEN_GRPH_UPDATE_LOCK)) {
2752 				tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
2753 				WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
2754 			}
2755 			tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
2756 			if (!(tmp & 1)) {
2757 				tmp |= 1;
2758 				WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
2759 			}
2760 		}
2761 	}
2762 }
2763 
evergreen_mc_resume(struct radeon_device * rdev,struct evergreen_mc_save * save)2764 void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
2765 {
2766 	u32 tmp, frame_count;
2767 	int i, j;
2768 
2769 	/* update crtc base addresses */
2770 	for (i = 0; i < rdev->num_crtc; i++) {
2771 		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
2772 		       upper_32_bits(rdev->mc.vram_start));
2773 		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
2774 		       upper_32_bits(rdev->mc.vram_start));
2775 		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
2776 		       (u32)rdev->mc.vram_start);
2777 		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
2778 		       (u32)rdev->mc.vram_start);
2779 	}
2780 
2781 	if (!ASIC_IS_NODCE(rdev)) {
2782 		WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
2783 		WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
2784 	}
2785 
2786 	/* unlock regs and wait for update */
2787 	for (i = 0; i < rdev->num_crtc; i++) {
2788 		if (save->crtc_enabled[i]) {
2789 			tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
2790 			if ((tmp & 0x7) != 0) {
2791 				tmp &= ~0x7;
2792 				WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
2793 			}
2794 			tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
2795 			if (tmp & EVERGREEN_GRPH_UPDATE_LOCK) {
2796 				tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
2797 				WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
2798 			}
2799 			tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
2800 			if (tmp & 1) {
2801 				tmp &= ~1;
2802 				WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
2803 			}
2804 			for (j = 0; j < rdev->usec_timeout; j++) {
2805 				tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
2806 				if ((tmp & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) == 0)
2807 					break;
2808 				udelay(1);
2809 			}
2810 		}
2811 	}
2812 
2813 	/* unblackout the MC */
2814 	tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
2815 	tmp &= ~BLACKOUT_MODE_MASK;
2816 	WREG32(MC_SHARED_BLACKOUT_CNTL, tmp);
2817 	/* allow CPU access */
2818 	WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
2819 
2820 	for (i = 0; i < rdev->num_crtc; i++) {
2821 		if (save->crtc_enabled[i]) {
2822 			if (ASIC_IS_DCE6(rdev)) {
2823 				tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
2824 				tmp &= ~EVERGREEN_CRTC_BLANK_DATA_EN;
2825 				WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
2826 				WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
2827 				WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
2828 			} else {
2829 				tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
2830 				tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
2831 				WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
2832 				WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
2833 				WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
2834 			}
2835 			/* wait for the next frame */
2836 			frame_count = radeon_get_vblank_counter(rdev, i);
2837 			for (j = 0; j < rdev->usec_timeout; j++) {
2838 				if (radeon_get_vblank_counter(rdev, i) != frame_count)
2839 					break;
2840 				udelay(1);
2841 			}
2842 		}
2843 	}
2844 	if (!ASIC_IS_NODCE(rdev)) {
2845 		/* Unlock vga access */
2846 		WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
2847 		mdelay(1);
2848 		WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
2849 	}
2850 }
2851 
evergreen_mc_program(struct radeon_device * rdev)2852 void evergreen_mc_program(struct radeon_device *rdev)
2853 {
2854 	struct evergreen_mc_save save;
2855 	u32 tmp;
2856 	int i, j;
2857 
2858 	/* Initialize HDP */
2859 	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
2860 		WREG32((0x2c14 + j), 0x00000000);
2861 		WREG32((0x2c18 + j), 0x00000000);
2862 		WREG32((0x2c1c + j), 0x00000000);
2863 		WREG32((0x2c20 + j), 0x00000000);
2864 		WREG32((0x2c24 + j), 0x00000000);
2865 	}
2866 	WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
2867 
2868 	evergreen_mc_stop(rdev, &save);
2869 	if (evergreen_mc_wait_for_idle(rdev)) {
2870 		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
2871 	}
2872 	/* Lockout access through VGA aperture*/
2873 	WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
2874 	/* Update configuration */
2875 	if (rdev->flags & RADEON_IS_AGP) {
2876 		if (rdev->mc.vram_start < rdev->mc.gtt_start) {
2877 			/* VRAM before AGP */
2878 			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
2879 				rdev->mc.vram_start >> 12);
2880 			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
2881 				rdev->mc.gtt_end >> 12);
2882 		} else {
2883 			/* VRAM after AGP */
2884 			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
2885 				rdev->mc.gtt_start >> 12);
2886 			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
2887 				rdev->mc.vram_end >> 12);
2888 		}
2889 	} else {
2890 		WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
2891 			rdev->mc.vram_start >> 12);
2892 		WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
2893 			rdev->mc.vram_end >> 12);
2894 	}
2895 	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
2896 	/* llano/ontario only */
2897 	if ((rdev->family == CHIP_PALM) ||
2898 	    (rdev->family == CHIP_SUMO) ||
2899 	    (rdev->family == CHIP_SUMO2)) {
2900 		tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF;
2901 		tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24;
2902 		tmp |= ((rdev->mc.vram_start >> 20) & 0xF) << 20;
2903 		WREG32(MC_FUS_VM_FB_OFFSET, tmp);
2904 	}
2905 	tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
2906 	tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
2907 	WREG32(MC_VM_FB_LOCATION, tmp);
2908 	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
2909 	WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
2910 	WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
2911 	if (rdev->flags & RADEON_IS_AGP) {
2912 		WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
2913 		WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
2914 		WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
2915 	} else {
2916 		WREG32(MC_VM_AGP_BASE, 0);
2917 		WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
2918 		WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
2919 	}
2920 	if (evergreen_mc_wait_for_idle(rdev)) {
2921 		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
2922 	}
2923 	evergreen_mc_resume(rdev, &save);
2924 	/* we need to own VRAM, so turn off the VGA renderer here
2925 	 * to stop it overwriting our objects */
2926 	rv515_vga_render_disable(rdev);
2927 }
2928 
2929 /*
2930  * CP.
2931  */
evergreen_ring_ib_execute(struct radeon_device * rdev,struct radeon_ib * ib)2932 void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
2933 {
2934 	struct radeon_ring *ring = &rdev->ring[ib->ring];
2935 	u32 next_rptr;
2936 
2937 	/* set to DX10/11 mode */
2938 	radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
2939 	radeon_ring_write(ring, 1);
2940 
2941 	if (ring->rptr_save_reg) {
2942 		next_rptr = ring->wptr + 3 + 4;
2943 		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2944 		radeon_ring_write(ring, ((ring->rptr_save_reg -
2945 					  PACKET3_SET_CONFIG_REG_START) >> 2));
2946 		radeon_ring_write(ring, next_rptr);
2947 	} else if (rdev->wb.enabled) {
2948 		next_rptr = ring->wptr + 5 + 4;
2949 		radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3));
2950 		radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
2951 		radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18));
2952 		radeon_ring_write(ring, next_rptr);
2953 		radeon_ring_write(ring, 0);
2954 	}
2955 
2956 	radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2957 	radeon_ring_write(ring,
2958 #ifdef __BIG_ENDIAN
2959 			  (2 << 0) |
2960 #endif
2961 			  (ib->gpu_addr & 0xFFFFFFFC));
2962 	radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
2963 	radeon_ring_write(ring, ib->length_dw);
2964 }
2965 
2966 
evergreen_cp_load_microcode(struct radeon_device * rdev)2967 static int evergreen_cp_load_microcode(struct radeon_device *rdev)
2968 {
2969 	const __be32 *fw_data;
2970 	int i;
2971 
2972 	if (!rdev->me_fw || !rdev->pfp_fw)
2973 		return -EINVAL;
2974 
2975 	r700_cp_stop(rdev);
2976 	WREG32(CP_RB_CNTL,
2977 #ifdef __BIG_ENDIAN
2978 	       BUF_SWAP_32BIT |
2979 #endif
2980 	       RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
2981 
2982 	fw_data = (const __be32 *)rdev->pfp_fw->data;
2983 	WREG32(CP_PFP_UCODE_ADDR, 0);
2984 	for (i = 0; i < EVERGREEN_PFP_UCODE_SIZE; i++)
2985 		WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
2986 	WREG32(CP_PFP_UCODE_ADDR, 0);
2987 
2988 	fw_data = (const __be32 *)rdev->me_fw->data;
2989 	WREG32(CP_ME_RAM_WADDR, 0);
2990 	for (i = 0; i < EVERGREEN_PM4_UCODE_SIZE; i++)
2991 		WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
2992 
2993 	WREG32(CP_PFP_UCODE_ADDR, 0);
2994 	WREG32(CP_ME_RAM_WADDR, 0);
2995 	WREG32(CP_ME_RAM_RADDR, 0);
2996 	return 0;
2997 }
2998 
evergreen_cp_start(struct radeon_device * rdev)2999 static int evergreen_cp_start(struct radeon_device *rdev)
3000 {
3001 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3002 	int r, i;
3003 	uint32_t cp_me;
3004 
3005 	r = radeon_ring_lock(rdev, ring, 7);
3006 	if (r) {
3007 		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
3008 		return r;
3009 	}
3010 	radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
3011 	radeon_ring_write(ring, 0x1);
3012 	radeon_ring_write(ring, 0x0);
3013 	radeon_ring_write(ring, rdev->config.evergreen.max_hw_contexts - 1);
3014 	radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
3015 	radeon_ring_write(ring, 0);
3016 	radeon_ring_write(ring, 0);
3017 	radeon_ring_unlock_commit(rdev, ring, false);
3018 
3019 	cp_me = 0xff;
3020 	WREG32(CP_ME_CNTL, cp_me);
3021 
3022 	r = radeon_ring_lock(rdev, ring, evergreen_default_size + 19);
3023 	if (r) {
3024 		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
3025 		return r;
3026 	}
3027 
3028 	/* setup clear context state */
3029 	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3030 	radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
3031 
3032 	for (i = 0; i < evergreen_default_size; i++)
3033 		radeon_ring_write(ring, evergreen_default_state[i]);
3034 
3035 	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3036 	radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
3037 
3038 	/* set clear context state */
3039 	radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
3040 	radeon_ring_write(ring, 0);
3041 
3042 	/* SQ_VTX_BASE_VTX_LOC */
3043 	radeon_ring_write(ring, 0xc0026f00);
3044 	radeon_ring_write(ring, 0x00000000);
3045 	radeon_ring_write(ring, 0x00000000);
3046 	radeon_ring_write(ring, 0x00000000);
3047 
3048 	/* Clear consts */
3049 	radeon_ring_write(ring, 0xc0036f00);
3050 	radeon_ring_write(ring, 0x00000bc4);
3051 	radeon_ring_write(ring, 0xffffffff);
3052 	radeon_ring_write(ring, 0xffffffff);
3053 	radeon_ring_write(ring, 0xffffffff);
3054 
3055 	radeon_ring_write(ring, 0xc0026900);
3056 	radeon_ring_write(ring, 0x00000316);
3057 	radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
3058 	radeon_ring_write(ring, 0x00000010); /*  */
3059 
3060 	radeon_ring_unlock_commit(rdev, ring, false);
3061 
3062 	return 0;
3063 }
3064 
evergreen_cp_resume(struct radeon_device * rdev)3065 static int evergreen_cp_resume(struct radeon_device *rdev)
3066 {
3067 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3068 	u32 tmp;
3069 	u32 rb_bufsz;
3070 	int r;
3071 
3072 	/* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
3073 	WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
3074 				 SOFT_RESET_PA |
3075 				 SOFT_RESET_SH |
3076 				 SOFT_RESET_VGT |
3077 				 SOFT_RESET_SPI |
3078 				 SOFT_RESET_SX));
3079 	RREG32(GRBM_SOFT_RESET);
3080 	mdelay(15);
3081 	WREG32(GRBM_SOFT_RESET, 0);
3082 	RREG32(GRBM_SOFT_RESET);
3083 
3084 	/* Set ring buffer size */
3085 	rb_bufsz = order_base_2(ring->ring_size / 8);
3086 	tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
3087 #ifdef __BIG_ENDIAN
3088 	tmp |= BUF_SWAP_32BIT;
3089 #endif
3090 	WREG32(CP_RB_CNTL, tmp);
3091 	WREG32(CP_SEM_WAIT_TIMER, 0x0);
3092 	WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
3093 
3094 	/* Set the write pointer delay */
3095 	WREG32(CP_RB_WPTR_DELAY, 0);
3096 
3097 	/* Initialize the ring buffer's read and write pointers */
3098 	WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
3099 	WREG32(CP_RB_RPTR_WR, 0);
3100 	ring->wptr = 0;
3101 	WREG32(CP_RB_WPTR, ring->wptr);
3102 
3103 	/* set the wb address whether it's enabled or not */
3104 	WREG32(CP_RB_RPTR_ADDR,
3105 	       ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
3106 	WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
3107 	WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
3108 
3109 	if (rdev->wb.enabled)
3110 		WREG32(SCRATCH_UMSK, 0xff);
3111 	else {
3112 		tmp |= RB_NO_UPDATE;
3113 		WREG32(SCRATCH_UMSK, 0);
3114 	}
3115 
3116 	mdelay(1);
3117 	WREG32(CP_RB_CNTL, tmp);
3118 
3119 	WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
3120 	WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
3121 
3122 	evergreen_cp_start(rdev);
3123 	ring->ready = true;
3124 	r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
3125 	if (r) {
3126 		ring->ready = false;
3127 		return r;
3128 	}
3129 	return 0;
3130 }
3131 
3132 /*
3133  * Core functions
3134  */
evergreen_gpu_init(struct radeon_device * rdev)3135 static void evergreen_gpu_init(struct radeon_device *rdev)
3136 {
3137 	u32 gb_addr_config;
3138 	u32 mc_arb_ramcfg;
3139 	u32 sx_debug_1;
3140 	u32 smx_dc_ctl0;
3141 	u32 sq_config;
3142 	u32 sq_lds_resource_mgmt;
3143 	u32 sq_gpr_resource_mgmt_1;
3144 	u32 sq_gpr_resource_mgmt_2;
3145 	u32 sq_gpr_resource_mgmt_3;
3146 	u32 sq_thread_resource_mgmt;
3147 	u32 sq_thread_resource_mgmt_2;
3148 	u32 sq_stack_resource_mgmt_1;
3149 	u32 sq_stack_resource_mgmt_2;
3150 	u32 sq_stack_resource_mgmt_3;
3151 	u32 vgt_cache_invalidation;
3152 	u32 hdp_host_path_cntl, tmp;
3153 	u32 disabled_rb_mask;
3154 	int i, j, ps_thread_count;
3155 
3156 	switch (rdev->family) {
3157 	case CHIP_CYPRESS:
3158 	case CHIP_HEMLOCK:
3159 		rdev->config.evergreen.num_ses = 2;
3160 		rdev->config.evergreen.max_pipes = 4;
3161 		rdev->config.evergreen.max_tile_pipes = 8;
3162 		rdev->config.evergreen.max_simds = 10;
3163 		rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
3164 		rdev->config.evergreen.max_gprs = 256;
3165 		rdev->config.evergreen.max_threads = 248;
3166 		rdev->config.evergreen.max_gs_threads = 32;
3167 		rdev->config.evergreen.max_stack_entries = 512;
3168 		rdev->config.evergreen.sx_num_of_sets = 4;
3169 		rdev->config.evergreen.sx_max_export_size = 256;
3170 		rdev->config.evergreen.sx_max_export_pos_size = 64;
3171 		rdev->config.evergreen.sx_max_export_smx_size = 192;
3172 		rdev->config.evergreen.max_hw_contexts = 8;
3173 		rdev->config.evergreen.sq_num_cf_insts = 2;
3174 
3175 		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
3176 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
3177 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
3178 		gb_addr_config = CYPRESS_GB_ADDR_CONFIG_GOLDEN;
3179 		break;
3180 	case CHIP_JUNIPER:
3181 		rdev->config.evergreen.num_ses = 1;
3182 		rdev->config.evergreen.max_pipes = 4;
3183 		rdev->config.evergreen.max_tile_pipes = 4;
3184 		rdev->config.evergreen.max_simds = 10;
3185 		rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
3186 		rdev->config.evergreen.max_gprs = 256;
3187 		rdev->config.evergreen.max_threads = 248;
3188 		rdev->config.evergreen.max_gs_threads = 32;
3189 		rdev->config.evergreen.max_stack_entries = 512;
3190 		rdev->config.evergreen.sx_num_of_sets = 4;
3191 		rdev->config.evergreen.sx_max_export_size = 256;
3192 		rdev->config.evergreen.sx_max_export_pos_size = 64;
3193 		rdev->config.evergreen.sx_max_export_smx_size = 192;
3194 		rdev->config.evergreen.max_hw_contexts = 8;
3195 		rdev->config.evergreen.sq_num_cf_insts = 2;
3196 
3197 		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
3198 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
3199 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
3200 		gb_addr_config = JUNIPER_GB_ADDR_CONFIG_GOLDEN;
3201 		break;
3202 	case CHIP_REDWOOD:
3203 		rdev->config.evergreen.num_ses = 1;
3204 		rdev->config.evergreen.max_pipes = 4;
3205 		rdev->config.evergreen.max_tile_pipes = 4;
3206 		rdev->config.evergreen.max_simds = 5;
3207 		rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
3208 		rdev->config.evergreen.max_gprs = 256;
3209 		rdev->config.evergreen.max_threads = 248;
3210 		rdev->config.evergreen.max_gs_threads = 32;
3211 		rdev->config.evergreen.max_stack_entries = 256;
3212 		rdev->config.evergreen.sx_num_of_sets = 4;
3213 		rdev->config.evergreen.sx_max_export_size = 256;
3214 		rdev->config.evergreen.sx_max_export_pos_size = 64;
3215 		rdev->config.evergreen.sx_max_export_smx_size = 192;
3216 		rdev->config.evergreen.max_hw_contexts = 8;
3217 		rdev->config.evergreen.sq_num_cf_insts = 2;
3218 
3219 		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
3220 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
3221 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
3222 		gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
3223 		break;
3224 	case CHIP_CEDAR:
3225 	default:
3226 		rdev->config.evergreen.num_ses = 1;
3227 		rdev->config.evergreen.max_pipes = 2;
3228 		rdev->config.evergreen.max_tile_pipes = 2;
3229 		rdev->config.evergreen.max_simds = 2;
3230 		rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
3231 		rdev->config.evergreen.max_gprs = 256;
3232 		rdev->config.evergreen.max_threads = 192;
3233 		rdev->config.evergreen.max_gs_threads = 16;
3234 		rdev->config.evergreen.max_stack_entries = 256;
3235 		rdev->config.evergreen.sx_num_of_sets = 4;
3236 		rdev->config.evergreen.sx_max_export_size = 128;
3237 		rdev->config.evergreen.sx_max_export_pos_size = 32;
3238 		rdev->config.evergreen.sx_max_export_smx_size = 96;
3239 		rdev->config.evergreen.max_hw_contexts = 4;
3240 		rdev->config.evergreen.sq_num_cf_insts = 1;
3241 
3242 		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
3243 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
3244 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
3245 		gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
3246 		break;
3247 	case CHIP_PALM:
3248 		rdev->config.evergreen.num_ses = 1;
3249 		rdev->config.evergreen.max_pipes = 2;
3250 		rdev->config.evergreen.max_tile_pipes = 2;
3251 		rdev->config.evergreen.max_simds = 2;
3252 		rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
3253 		rdev->config.evergreen.max_gprs = 256;
3254 		rdev->config.evergreen.max_threads = 192;
3255 		rdev->config.evergreen.max_gs_threads = 16;
3256 		rdev->config.evergreen.max_stack_entries = 256;
3257 		rdev->config.evergreen.sx_num_of_sets = 4;
3258 		rdev->config.evergreen.sx_max_export_size = 128;
3259 		rdev->config.evergreen.sx_max_export_pos_size = 32;
3260 		rdev->config.evergreen.sx_max_export_smx_size = 96;
3261 		rdev->config.evergreen.max_hw_contexts = 4;
3262 		rdev->config.evergreen.sq_num_cf_insts = 1;
3263 
3264 		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
3265 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
3266 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
3267 		gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
3268 		break;
3269 	case CHIP_SUMO:
3270 		rdev->config.evergreen.num_ses = 1;
3271 		rdev->config.evergreen.max_pipes = 4;
3272 		rdev->config.evergreen.max_tile_pipes = 4;
3273 		if (rdev->pdev->device == 0x9648)
3274 			rdev->config.evergreen.max_simds = 3;
3275 		else if ((rdev->pdev->device == 0x9647) ||
3276 			 (rdev->pdev->device == 0x964a))
3277 			rdev->config.evergreen.max_simds = 4;
3278 		else
3279 			rdev->config.evergreen.max_simds = 5;
3280 		rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
3281 		rdev->config.evergreen.max_gprs = 256;
3282 		rdev->config.evergreen.max_threads = 248;
3283 		rdev->config.evergreen.max_gs_threads = 32;
3284 		rdev->config.evergreen.max_stack_entries = 256;
3285 		rdev->config.evergreen.sx_num_of_sets = 4;
3286 		rdev->config.evergreen.sx_max_export_size = 256;
3287 		rdev->config.evergreen.sx_max_export_pos_size = 64;
3288 		rdev->config.evergreen.sx_max_export_smx_size = 192;
3289 		rdev->config.evergreen.max_hw_contexts = 8;
3290 		rdev->config.evergreen.sq_num_cf_insts = 2;
3291 
3292 		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
3293 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
3294 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
3295 		gb_addr_config = SUMO_GB_ADDR_CONFIG_GOLDEN;
3296 		break;
3297 	case CHIP_SUMO2:
3298 		rdev->config.evergreen.num_ses = 1;
3299 		rdev->config.evergreen.max_pipes = 4;
3300 		rdev->config.evergreen.max_tile_pipes = 4;
3301 		rdev->config.evergreen.max_simds = 2;
3302 		rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
3303 		rdev->config.evergreen.max_gprs = 256;
3304 		rdev->config.evergreen.max_threads = 248;
3305 		rdev->config.evergreen.max_gs_threads = 32;
3306 		rdev->config.evergreen.max_stack_entries = 512;
3307 		rdev->config.evergreen.sx_num_of_sets = 4;
3308 		rdev->config.evergreen.sx_max_export_size = 256;
3309 		rdev->config.evergreen.sx_max_export_pos_size = 64;
3310 		rdev->config.evergreen.sx_max_export_smx_size = 192;
3311 		rdev->config.evergreen.max_hw_contexts = 4;
3312 		rdev->config.evergreen.sq_num_cf_insts = 2;
3313 
3314 		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
3315 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
3316 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
3317 		gb_addr_config = SUMO2_GB_ADDR_CONFIG_GOLDEN;
3318 		break;
3319 	case CHIP_BARTS:
3320 		rdev->config.evergreen.num_ses = 2;
3321 		rdev->config.evergreen.max_pipes = 4;
3322 		rdev->config.evergreen.max_tile_pipes = 8;
3323 		rdev->config.evergreen.max_simds = 7;
3324 		rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
3325 		rdev->config.evergreen.max_gprs = 256;
3326 		rdev->config.evergreen.max_threads = 248;
3327 		rdev->config.evergreen.max_gs_threads = 32;
3328 		rdev->config.evergreen.max_stack_entries = 512;
3329 		rdev->config.evergreen.sx_num_of_sets = 4;
3330 		rdev->config.evergreen.sx_max_export_size = 256;
3331 		rdev->config.evergreen.sx_max_export_pos_size = 64;
3332 		rdev->config.evergreen.sx_max_export_smx_size = 192;
3333 		rdev->config.evergreen.max_hw_contexts = 8;
3334 		rdev->config.evergreen.sq_num_cf_insts = 2;
3335 
3336 		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
3337 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
3338 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
3339 		gb_addr_config = BARTS_GB_ADDR_CONFIG_GOLDEN;
3340 		break;
3341 	case CHIP_TURKS:
3342 		rdev->config.evergreen.num_ses = 1;
3343 		rdev->config.evergreen.max_pipes = 4;
3344 		rdev->config.evergreen.max_tile_pipes = 4;
3345 		rdev->config.evergreen.max_simds = 6;
3346 		rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
3347 		rdev->config.evergreen.max_gprs = 256;
3348 		rdev->config.evergreen.max_threads = 248;
3349 		rdev->config.evergreen.max_gs_threads = 32;
3350 		rdev->config.evergreen.max_stack_entries = 256;
3351 		rdev->config.evergreen.sx_num_of_sets = 4;
3352 		rdev->config.evergreen.sx_max_export_size = 256;
3353 		rdev->config.evergreen.sx_max_export_pos_size = 64;
3354 		rdev->config.evergreen.sx_max_export_smx_size = 192;
3355 		rdev->config.evergreen.max_hw_contexts = 8;
3356 		rdev->config.evergreen.sq_num_cf_insts = 2;
3357 
3358 		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
3359 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
3360 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
3361 		gb_addr_config = TURKS_GB_ADDR_CONFIG_GOLDEN;
3362 		break;
3363 	case CHIP_CAICOS:
3364 		rdev->config.evergreen.num_ses = 1;
3365 		rdev->config.evergreen.max_pipes = 2;
3366 		rdev->config.evergreen.max_tile_pipes = 2;
3367 		rdev->config.evergreen.max_simds = 2;
3368 		rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
3369 		rdev->config.evergreen.max_gprs = 256;
3370 		rdev->config.evergreen.max_threads = 192;
3371 		rdev->config.evergreen.max_gs_threads = 16;
3372 		rdev->config.evergreen.max_stack_entries = 256;
3373 		rdev->config.evergreen.sx_num_of_sets = 4;
3374 		rdev->config.evergreen.sx_max_export_size = 128;
3375 		rdev->config.evergreen.sx_max_export_pos_size = 32;
3376 		rdev->config.evergreen.sx_max_export_smx_size = 96;
3377 		rdev->config.evergreen.max_hw_contexts = 4;
3378 		rdev->config.evergreen.sq_num_cf_insts = 1;
3379 
3380 		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
3381 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
3382 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
3383 		gb_addr_config = CAICOS_GB_ADDR_CONFIG_GOLDEN;
3384 		break;
3385 	}
3386 
3387 	/* Initialize HDP */
3388 	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
3389 		WREG32((0x2c14 + j), 0x00000000);
3390 		WREG32((0x2c18 + j), 0x00000000);
3391 		WREG32((0x2c1c + j), 0x00000000);
3392 		WREG32((0x2c20 + j), 0x00000000);
3393 		WREG32((0x2c24 + j), 0x00000000);
3394 	}
3395 
3396 	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
3397 	WREG32(SRBM_INT_CNTL, 0x1);
3398 	WREG32(SRBM_INT_ACK, 0x1);
3399 
3400 	evergreen_fix_pci_max_read_req_size(rdev);
3401 
3402 	RREG32(MC_SHARED_CHMAP);
3403 	if ((rdev->family == CHIP_PALM) ||
3404 	    (rdev->family == CHIP_SUMO) ||
3405 	    (rdev->family == CHIP_SUMO2))
3406 		mc_arb_ramcfg = RREG32(FUS_MC_ARB_RAMCFG);
3407 	else
3408 		mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
3409 
3410 	/* setup tiling info dword.  gb_addr_config is not adequate since it does
3411 	 * not have bank info, so create a custom tiling dword.
3412 	 * bits 3:0   num_pipes
3413 	 * bits 7:4   num_banks
3414 	 * bits 11:8  group_size
3415 	 * bits 15:12 row_size
3416 	 */
3417 	rdev->config.evergreen.tile_config = 0;
3418 	switch (rdev->config.evergreen.max_tile_pipes) {
3419 	case 1:
3420 	default:
3421 		rdev->config.evergreen.tile_config |= (0 << 0);
3422 		break;
3423 	case 2:
3424 		rdev->config.evergreen.tile_config |= (1 << 0);
3425 		break;
3426 	case 4:
3427 		rdev->config.evergreen.tile_config |= (2 << 0);
3428 		break;
3429 	case 8:
3430 		rdev->config.evergreen.tile_config |= (3 << 0);
3431 		break;
3432 	}
3433 	/* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
3434 	if (rdev->flags & RADEON_IS_IGP)
3435 		rdev->config.evergreen.tile_config |= 1 << 4;
3436 	else {
3437 		switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
3438 		case 0: /* four banks */
3439 			rdev->config.evergreen.tile_config |= 0 << 4;
3440 			break;
3441 		case 1: /* eight banks */
3442 			rdev->config.evergreen.tile_config |= 1 << 4;
3443 			break;
3444 		case 2: /* sixteen banks */
3445 		default:
3446 			rdev->config.evergreen.tile_config |= 2 << 4;
3447 			break;
3448 		}
3449 	}
3450 	rdev->config.evergreen.tile_config |= 0 << 8;
3451 	rdev->config.evergreen.tile_config |=
3452 		((gb_addr_config & 0x30000000) >> 28) << 12;
3453 
3454 	if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) {
3455 		u32 efuse_straps_4;
3456 		u32 efuse_straps_3;
3457 
3458 		efuse_straps_4 = RREG32_RCU(0x204);
3459 		efuse_straps_3 = RREG32_RCU(0x203);
3460 		tmp = (((efuse_straps_4 & 0xf) << 4) |
3461 		      ((efuse_straps_3 & 0xf0000000) >> 28));
3462 	} else {
3463 		tmp = 0;
3464 		for (i = (rdev->config.evergreen.num_ses - 1); i >= 0; i--) {
3465 			u32 rb_disable_bitmap;
3466 
3467 			WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
3468 			WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
3469 			rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
3470 			tmp <<= 4;
3471 			tmp |= rb_disable_bitmap;
3472 		}
3473 	}
3474 	/* enabled rb are just the one not disabled :) */
3475 	disabled_rb_mask = tmp;
3476 	tmp = 0;
3477 	for (i = 0; i < rdev->config.evergreen.max_backends; i++)
3478 		tmp |= (1 << i);
3479 	/* if all the backends are disabled, fix it up here */
3480 	if ((disabled_rb_mask & tmp) == tmp) {
3481 		for (i = 0; i < rdev->config.evergreen.max_backends; i++)
3482 			disabled_rb_mask &= ~(1 << i);
3483 	}
3484 
3485 	for (i = 0; i < rdev->config.evergreen.num_ses; i++) {
3486 		u32 simd_disable_bitmap;
3487 
3488 		WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
3489 		WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
3490 		simd_disable_bitmap = (RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffff0000) >> 16;
3491 		simd_disable_bitmap |= 0xffffffff << rdev->config.evergreen.max_simds;
3492 		tmp <<= 16;
3493 		tmp |= simd_disable_bitmap;
3494 	}
3495 	rdev->config.evergreen.active_simds = hweight32(~tmp);
3496 
3497 	WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
3498 	WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
3499 
3500 	WREG32(GB_ADDR_CONFIG, gb_addr_config);
3501 	WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
3502 	WREG32(HDP_ADDR_CONFIG, gb_addr_config);
3503 	WREG32(DMA_TILING_CONFIG, gb_addr_config);
3504 	WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
3505 	WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
3506 	WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
3507 
3508 	if ((rdev->config.evergreen.max_backends == 1) &&
3509 	    (rdev->flags & RADEON_IS_IGP)) {
3510 		if ((disabled_rb_mask & 3) == 1) {
3511 			/* RB0 disabled, RB1 enabled */
3512 			tmp = 0x11111111;
3513 		} else {
3514 			/* RB1 disabled, RB0 enabled */
3515 			tmp = 0x00000000;
3516 		}
3517 	} else {
3518 		tmp = gb_addr_config & NUM_PIPES_MASK;
3519 		tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
3520 						EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
3521 	}
3522 	rdev->config.evergreen.backend_map = tmp;
3523 	WREG32(GB_BACKEND_MAP, tmp);
3524 
3525 	WREG32(CGTS_SYS_TCC_DISABLE, 0);
3526 	WREG32(CGTS_TCC_DISABLE, 0);
3527 	WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
3528 	WREG32(CGTS_USER_TCC_DISABLE, 0);
3529 
3530 	/* set HW defaults for 3D engine */
3531 	WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
3532 				     ROQ_IB2_START(0x2b)));
3533 
3534 	WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
3535 
3536 	WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO |
3537 			     SYNC_GRADIENT |
3538 			     SYNC_WALKER |
3539 			     SYNC_ALIGNER));
3540 
3541 	sx_debug_1 = RREG32(SX_DEBUG_1);
3542 	sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
3543 	WREG32(SX_DEBUG_1, sx_debug_1);
3544 
3545 
3546 	smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
3547 	smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
3548 	smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
3549 	WREG32(SMX_DC_CTL0, smx_dc_ctl0);
3550 
3551 	if (rdev->family <= CHIP_SUMO2)
3552 		WREG32(SMX_SAR_CTL0, 0x00010000);
3553 
3554 	WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
3555 					POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
3556 					SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
3557 
3558 	WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) |
3559 				 SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) |
3560 				 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size)));
3561 
3562 	WREG32(VGT_NUM_INSTANCES, 1);
3563 	WREG32(SPI_CONFIG_CNTL, 0);
3564 	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
3565 	WREG32(CP_PERFMON_CNTL, 0);
3566 
3567 	WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) |
3568 				  FETCH_FIFO_HIWATER(0x4) |
3569 				  DONE_FIFO_HIWATER(0xe0) |
3570 				  ALU_UPDATE_FIFO_HIWATER(0x8)));
3571 
3572 	sq_config = RREG32(SQ_CONFIG);
3573 	sq_config &= ~(PS_PRIO(3) |
3574 		       VS_PRIO(3) |
3575 		       GS_PRIO(3) |
3576 		       ES_PRIO(3));
3577 	sq_config |= (VC_ENABLE |
3578 		      EXPORT_SRC_C |
3579 		      PS_PRIO(0) |
3580 		      VS_PRIO(1) |
3581 		      GS_PRIO(2) |
3582 		      ES_PRIO(3));
3583 
3584 	switch (rdev->family) {
3585 	case CHIP_CEDAR:
3586 	case CHIP_PALM:
3587 	case CHIP_SUMO:
3588 	case CHIP_SUMO2:
3589 	case CHIP_CAICOS:
3590 		/* no vertex cache */
3591 		sq_config &= ~VC_ENABLE;
3592 		break;
3593 	default:
3594 		break;
3595 	}
3596 
3597 	sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT);
3598 
3599 	sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2))* 12 / 32);
3600 	sq_gpr_resource_mgmt_1 |= NUM_VS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 6 / 32);
3601 	sq_gpr_resource_mgmt_1 |= NUM_CLAUSE_TEMP_GPRS(4);
3602 	sq_gpr_resource_mgmt_2 = NUM_GS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
3603 	sq_gpr_resource_mgmt_2 |= NUM_ES_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
3604 	sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
3605 	sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
3606 
3607 	switch (rdev->family) {
3608 	case CHIP_CEDAR:
3609 	case CHIP_PALM:
3610 	case CHIP_SUMO:
3611 	case CHIP_SUMO2:
3612 		ps_thread_count = 96;
3613 		break;
3614 	default:
3615 		ps_thread_count = 128;
3616 		break;
3617 	}
3618 
3619 	sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count);
3620 	sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
3621 	sq_thread_resource_mgmt |= NUM_GS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
3622 	sq_thread_resource_mgmt |= NUM_ES_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
3623 	sq_thread_resource_mgmt_2 = NUM_HS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
3624 	sq_thread_resource_mgmt_2 |= NUM_LS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
3625 
3626 	sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
3627 	sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
3628 	sq_stack_resource_mgmt_2 = NUM_GS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
3629 	sq_stack_resource_mgmt_2 |= NUM_ES_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
3630 	sq_stack_resource_mgmt_3 = NUM_HS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
3631 	sq_stack_resource_mgmt_3 |= NUM_LS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
3632 
3633 	WREG32(SQ_CONFIG, sq_config);
3634 	WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
3635 	WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
3636 	WREG32(SQ_GPR_RESOURCE_MGMT_3, sq_gpr_resource_mgmt_3);
3637 	WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
3638 	WREG32(SQ_THREAD_RESOURCE_MGMT_2, sq_thread_resource_mgmt_2);
3639 	WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
3640 	WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
3641 	WREG32(SQ_STACK_RESOURCE_MGMT_3, sq_stack_resource_mgmt_3);
3642 	WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0);
3643 	WREG32(SQ_LDS_RESOURCE_MGMT, sq_lds_resource_mgmt);
3644 
3645 	WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
3646 					  FORCE_EOV_MAX_REZ_CNT(255)));
3647 
3648 	switch (rdev->family) {
3649 	case CHIP_CEDAR:
3650 	case CHIP_PALM:
3651 	case CHIP_SUMO:
3652 	case CHIP_SUMO2:
3653 	case CHIP_CAICOS:
3654 		vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY);
3655 		break;
3656 	default:
3657 		vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC);
3658 		break;
3659 	}
3660 	vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO);
3661 	WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation);
3662 
3663 	WREG32(VGT_GS_VERTEX_REUSE, 16);
3664 	WREG32(PA_SU_LINE_STIPPLE_VALUE, 0);
3665 	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
3666 
3667 	WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14);
3668 	WREG32(VGT_OUT_DEALLOC_CNTL, 16);
3669 
3670 	WREG32(CB_PERF_CTR0_SEL_0, 0);
3671 	WREG32(CB_PERF_CTR0_SEL_1, 0);
3672 	WREG32(CB_PERF_CTR1_SEL_0, 0);
3673 	WREG32(CB_PERF_CTR1_SEL_1, 0);
3674 	WREG32(CB_PERF_CTR2_SEL_0, 0);
3675 	WREG32(CB_PERF_CTR2_SEL_1, 0);
3676 	WREG32(CB_PERF_CTR3_SEL_0, 0);
3677 	WREG32(CB_PERF_CTR3_SEL_1, 0);
3678 
3679 	/* clear render buffer base addresses */
3680 	WREG32(CB_COLOR0_BASE, 0);
3681 	WREG32(CB_COLOR1_BASE, 0);
3682 	WREG32(CB_COLOR2_BASE, 0);
3683 	WREG32(CB_COLOR3_BASE, 0);
3684 	WREG32(CB_COLOR4_BASE, 0);
3685 	WREG32(CB_COLOR5_BASE, 0);
3686 	WREG32(CB_COLOR6_BASE, 0);
3687 	WREG32(CB_COLOR7_BASE, 0);
3688 	WREG32(CB_COLOR8_BASE, 0);
3689 	WREG32(CB_COLOR9_BASE, 0);
3690 	WREG32(CB_COLOR10_BASE, 0);
3691 	WREG32(CB_COLOR11_BASE, 0);
3692 
3693 	/* set the shader const cache sizes to 0 */
3694 	for (i = SQ_ALU_CONST_BUFFER_SIZE_PS_0; i < 0x28200; i += 4)
3695 		WREG32(i, 0);
3696 	for (i = SQ_ALU_CONST_BUFFER_SIZE_HS_0; i < 0x29000; i += 4)
3697 		WREG32(i, 0);
3698 
3699 	tmp = RREG32(HDP_MISC_CNTL);
3700 	tmp |= HDP_FLUSH_INVALIDATE_CACHE;
3701 	WREG32(HDP_MISC_CNTL, tmp);
3702 
3703 	hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
3704 	WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
3705 
3706 	WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
3707 
3708 	udelay(50);
3709 
3710 }
3711 
evergreen_mc_init(struct radeon_device * rdev)3712 int evergreen_mc_init(struct radeon_device *rdev)
3713 {
3714 	u32 tmp;
3715 	int chansize, numchan;
3716 
3717 	/* Get VRAM informations */
3718 	rdev->mc.vram_is_ddr = true;
3719 	if ((rdev->family == CHIP_PALM) ||
3720 	    (rdev->family == CHIP_SUMO) ||
3721 	    (rdev->family == CHIP_SUMO2))
3722 		tmp = RREG32(FUS_MC_ARB_RAMCFG);
3723 	else
3724 		tmp = RREG32(MC_ARB_RAMCFG);
3725 	if (tmp & CHANSIZE_OVERRIDE) {
3726 		chansize = 16;
3727 	} else if (tmp & CHANSIZE_MASK) {
3728 		chansize = 64;
3729 	} else {
3730 		chansize = 32;
3731 	}
3732 	tmp = RREG32(MC_SHARED_CHMAP);
3733 	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
3734 	case 0:
3735 	default:
3736 		numchan = 1;
3737 		break;
3738 	case 1:
3739 		numchan = 2;
3740 		break;
3741 	case 2:
3742 		numchan = 4;
3743 		break;
3744 	case 3:
3745 		numchan = 8;
3746 		break;
3747 	}
3748 	rdev->mc.vram_width = numchan * chansize;
3749 	/* Could aper size report 0 ? */
3750 	rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
3751 	rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
3752 	/* Setup GPU memory space */
3753 	if ((rdev->family == CHIP_PALM) ||
3754 	    (rdev->family == CHIP_SUMO) ||
3755 	    (rdev->family == CHIP_SUMO2)) {
3756 		/* size in bytes on fusion */
3757 		rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
3758 		rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
3759 	} else {
3760 		/* size in MB on evergreen/cayman/tn */
3761 		rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
3762 		rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
3763 	}
3764 	rdev->mc.visible_vram_size = rdev->mc.aper_size;
3765 	r700_vram_gtt_location(rdev, &rdev->mc);
3766 	radeon_update_bandwidth_info(rdev);
3767 
3768 	return 0;
3769 }
3770 
evergreen_print_gpu_status_regs(struct radeon_device * rdev)3771 void evergreen_print_gpu_status_regs(struct radeon_device *rdev)
3772 {
3773 	dev_info(rdev->dev, "  GRBM_STATUS               = 0x%08X\n",
3774 		RREG32(GRBM_STATUS));
3775 	dev_info(rdev->dev, "  GRBM_STATUS_SE0           = 0x%08X\n",
3776 		RREG32(GRBM_STATUS_SE0));
3777 	dev_info(rdev->dev, "  GRBM_STATUS_SE1           = 0x%08X\n",
3778 		RREG32(GRBM_STATUS_SE1));
3779 	dev_info(rdev->dev, "  SRBM_STATUS               = 0x%08X\n",
3780 		RREG32(SRBM_STATUS));
3781 	dev_info(rdev->dev, "  SRBM_STATUS2              = 0x%08X\n",
3782 		RREG32(SRBM_STATUS2));
3783 	dev_info(rdev->dev, "  R_008674_CP_STALLED_STAT1 = 0x%08X\n",
3784 		RREG32(CP_STALLED_STAT1));
3785 	dev_info(rdev->dev, "  R_008678_CP_STALLED_STAT2 = 0x%08X\n",
3786 		RREG32(CP_STALLED_STAT2));
3787 	dev_info(rdev->dev, "  R_00867C_CP_BUSY_STAT     = 0x%08X\n",
3788 		RREG32(CP_BUSY_STAT));
3789 	dev_info(rdev->dev, "  R_008680_CP_STAT          = 0x%08X\n",
3790 		RREG32(CP_STAT));
3791 	dev_info(rdev->dev, "  R_00D034_DMA_STATUS_REG   = 0x%08X\n",
3792 		RREG32(DMA_STATUS_REG));
3793 	if (rdev->family >= CHIP_CAYMAN) {
3794 		dev_info(rdev->dev, "  R_00D834_DMA_STATUS_REG   = 0x%08X\n",
3795 			 RREG32(DMA_STATUS_REG + 0x800));
3796 	}
3797 }
3798 
evergreen_is_display_hung(struct radeon_device * rdev)3799 bool evergreen_is_display_hung(struct radeon_device *rdev)
3800 {
3801 	u32 crtc_hung = 0;
3802 	u32 crtc_status[6];
3803 	u32 i, j, tmp;
3804 
3805 	for (i = 0; i < rdev->num_crtc; i++) {
3806 		if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN) {
3807 			crtc_status[i] = RREG32(EVERGREEN_CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
3808 			crtc_hung |= (1 << i);
3809 		}
3810 	}
3811 
3812 	for (j = 0; j < 10; j++) {
3813 		for (i = 0; i < rdev->num_crtc; i++) {
3814 			if (crtc_hung & (1 << i)) {
3815 				tmp = RREG32(EVERGREEN_CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
3816 				if (tmp != crtc_status[i])
3817 					crtc_hung &= ~(1 << i);
3818 			}
3819 		}
3820 		if (crtc_hung == 0)
3821 			return false;
3822 		udelay(100);
3823 	}
3824 
3825 	return true;
3826 }
3827 
evergreen_gpu_check_soft_reset(struct radeon_device * rdev)3828 u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev)
3829 {
3830 	u32 reset_mask = 0;
3831 	u32 tmp;
3832 
3833 	/* GRBM_STATUS */
3834 	tmp = RREG32(GRBM_STATUS);
3835 	if (tmp & (PA_BUSY | SC_BUSY |
3836 		   SH_BUSY | SX_BUSY |
3837 		   TA_BUSY | VGT_BUSY |
3838 		   DB_BUSY | CB_BUSY |
3839 		   SPI_BUSY | VGT_BUSY_NO_DMA))
3840 		reset_mask |= RADEON_RESET_GFX;
3841 
3842 	if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
3843 		   CP_BUSY | CP_COHERENCY_BUSY))
3844 		reset_mask |= RADEON_RESET_CP;
3845 
3846 	if (tmp & GRBM_EE_BUSY)
3847 		reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
3848 
3849 	/* DMA_STATUS_REG */
3850 	tmp = RREG32(DMA_STATUS_REG);
3851 	if (!(tmp & DMA_IDLE))
3852 		reset_mask |= RADEON_RESET_DMA;
3853 
3854 	/* SRBM_STATUS2 */
3855 	tmp = RREG32(SRBM_STATUS2);
3856 	if (tmp & DMA_BUSY)
3857 		reset_mask |= RADEON_RESET_DMA;
3858 
3859 	/* SRBM_STATUS */
3860 	tmp = RREG32(SRBM_STATUS);
3861 	if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
3862 		reset_mask |= RADEON_RESET_RLC;
3863 
3864 	if (tmp & IH_BUSY)
3865 		reset_mask |= RADEON_RESET_IH;
3866 
3867 	if (tmp & SEM_BUSY)
3868 		reset_mask |= RADEON_RESET_SEM;
3869 
3870 	if (tmp & GRBM_RQ_PENDING)
3871 		reset_mask |= RADEON_RESET_GRBM;
3872 
3873 	if (tmp & VMC_BUSY)
3874 		reset_mask |= RADEON_RESET_VMC;
3875 
3876 	if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
3877 		   MCC_BUSY | MCD_BUSY))
3878 		reset_mask |= RADEON_RESET_MC;
3879 
3880 	if (evergreen_is_display_hung(rdev))
3881 		reset_mask |= RADEON_RESET_DISPLAY;
3882 
3883 	/* VM_L2_STATUS */
3884 	tmp = RREG32(VM_L2_STATUS);
3885 	if (tmp & L2_BUSY)
3886 		reset_mask |= RADEON_RESET_VMC;
3887 
3888 	/* Skip MC reset as it's mostly likely not hung, just busy */
3889 	if (reset_mask & RADEON_RESET_MC) {
3890 		DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
3891 		reset_mask &= ~RADEON_RESET_MC;
3892 	}
3893 
3894 	return reset_mask;
3895 }
3896 
evergreen_gpu_soft_reset(struct radeon_device * rdev,u32 reset_mask)3897 static void evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
3898 {
3899 	struct evergreen_mc_save save;
3900 	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
3901 	u32 tmp;
3902 
3903 	if (reset_mask == 0)
3904 		return;
3905 
3906 	dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
3907 
3908 	evergreen_print_gpu_status_regs(rdev);
3909 
3910 	/* Disable CP parsing/prefetching */
3911 	WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
3912 
3913 	if (reset_mask & RADEON_RESET_DMA) {
3914 		/* Disable DMA */
3915 		tmp = RREG32(DMA_RB_CNTL);
3916 		tmp &= ~DMA_RB_ENABLE;
3917 		WREG32(DMA_RB_CNTL, tmp);
3918 	}
3919 
3920 	udelay(50);
3921 
3922 	evergreen_mc_stop(rdev, &save);
3923 	if (evergreen_mc_wait_for_idle(rdev)) {
3924 		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
3925 	}
3926 
3927 	if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
3928 		grbm_soft_reset |= SOFT_RESET_DB |
3929 			SOFT_RESET_CB |
3930 			SOFT_RESET_PA |
3931 			SOFT_RESET_SC |
3932 			SOFT_RESET_SPI |
3933 			SOFT_RESET_SX |
3934 			SOFT_RESET_SH |
3935 			SOFT_RESET_TC |
3936 			SOFT_RESET_TA |
3937 			SOFT_RESET_VC |
3938 			SOFT_RESET_VGT;
3939 	}
3940 
3941 	if (reset_mask & RADEON_RESET_CP) {
3942 		grbm_soft_reset |= SOFT_RESET_CP |
3943 			SOFT_RESET_VGT;
3944 
3945 		srbm_soft_reset |= SOFT_RESET_GRBM;
3946 	}
3947 
3948 	if (reset_mask & RADEON_RESET_DMA)
3949 		srbm_soft_reset |= SOFT_RESET_DMA;
3950 
3951 	if (reset_mask & RADEON_RESET_DISPLAY)
3952 		srbm_soft_reset |= SOFT_RESET_DC;
3953 
3954 	if (reset_mask & RADEON_RESET_RLC)
3955 		srbm_soft_reset |= SOFT_RESET_RLC;
3956 
3957 	if (reset_mask & RADEON_RESET_SEM)
3958 		srbm_soft_reset |= SOFT_RESET_SEM;
3959 
3960 	if (reset_mask & RADEON_RESET_IH)
3961 		srbm_soft_reset |= SOFT_RESET_IH;
3962 
3963 	if (reset_mask & RADEON_RESET_GRBM)
3964 		srbm_soft_reset |= SOFT_RESET_GRBM;
3965 
3966 	if (reset_mask & RADEON_RESET_VMC)
3967 		srbm_soft_reset |= SOFT_RESET_VMC;
3968 
3969 	if (!(rdev->flags & RADEON_IS_IGP)) {
3970 		if (reset_mask & RADEON_RESET_MC)
3971 			srbm_soft_reset |= SOFT_RESET_MC;
3972 	}
3973 
3974 	if (grbm_soft_reset) {
3975 		tmp = RREG32(GRBM_SOFT_RESET);
3976 		tmp |= grbm_soft_reset;
3977 		dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
3978 		WREG32(GRBM_SOFT_RESET, tmp);
3979 		tmp = RREG32(GRBM_SOFT_RESET);
3980 
3981 		udelay(50);
3982 
3983 		tmp &= ~grbm_soft_reset;
3984 		WREG32(GRBM_SOFT_RESET, tmp);
3985 		tmp = RREG32(GRBM_SOFT_RESET);
3986 	}
3987 
3988 	if (srbm_soft_reset) {
3989 		tmp = RREG32(SRBM_SOFT_RESET);
3990 		tmp |= srbm_soft_reset;
3991 		dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
3992 		WREG32(SRBM_SOFT_RESET, tmp);
3993 		tmp = RREG32(SRBM_SOFT_RESET);
3994 
3995 		udelay(50);
3996 
3997 		tmp &= ~srbm_soft_reset;
3998 		WREG32(SRBM_SOFT_RESET, tmp);
3999 		tmp = RREG32(SRBM_SOFT_RESET);
4000 	}
4001 
4002 	/* Wait a little for things to settle down */
4003 	udelay(50);
4004 
4005 	evergreen_mc_resume(rdev, &save);
4006 	udelay(50);
4007 
4008 	evergreen_print_gpu_status_regs(rdev);
4009 }
4010 
evergreen_gpu_pci_config_reset(struct radeon_device * rdev)4011 void evergreen_gpu_pci_config_reset(struct radeon_device *rdev)
4012 {
4013 	struct evergreen_mc_save save;
4014 	u32 tmp, i;
4015 
4016 	dev_info(rdev->dev, "GPU pci config reset\n");
4017 
4018 	/* disable dpm? */
4019 
4020 	/* Disable CP parsing/prefetching */
4021 	WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
4022 	udelay(50);
4023 	/* Disable DMA */
4024 	tmp = RREG32(DMA_RB_CNTL);
4025 	tmp &= ~DMA_RB_ENABLE;
4026 	WREG32(DMA_RB_CNTL, tmp);
4027 	/* XXX other engines? */
4028 
4029 	/* halt the rlc */
4030 	r600_rlc_stop(rdev);
4031 
4032 	udelay(50);
4033 
4034 	/* set mclk/sclk to bypass */
4035 	rv770_set_clk_bypass_mode(rdev);
4036 	/* disable BM */
4037 	pci_clear_master(rdev->pdev);
4038 	/* disable mem access */
4039 	evergreen_mc_stop(rdev, &save);
4040 	if (evergreen_mc_wait_for_idle(rdev)) {
4041 		dev_warn(rdev->dev, "Wait for MC idle timed out !\n");
4042 	}
4043 	/* reset */
4044 	radeon_pci_config_reset(rdev);
4045 	/* wait for asic to come out of reset */
4046 	for (i = 0; i < rdev->usec_timeout; i++) {
4047 		if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
4048 			break;
4049 		udelay(1);
4050 	}
4051 }
4052 
evergreen_asic_reset(struct radeon_device * rdev,bool hard)4053 int evergreen_asic_reset(struct radeon_device *rdev, bool hard)
4054 {
4055 	u32 reset_mask;
4056 
4057 	if (hard) {
4058 		evergreen_gpu_pci_config_reset(rdev);
4059 		return 0;
4060 	}
4061 
4062 	reset_mask = evergreen_gpu_check_soft_reset(rdev);
4063 
4064 	if (reset_mask)
4065 		r600_set_bios_scratch_engine_hung(rdev, true);
4066 
4067 	/* try soft reset */
4068 	evergreen_gpu_soft_reset(rdev, reset_mask);
4069 
4070 	reset_mask = evergreen_gpu_check_soft_reset(rdev);
4071 
4072 	/* try pci config reset */
4073 	if (reset_mask && radeon_hard_reset)
4074 		evergreen_gpu_pci_config_reset(rdev);
4075 
4076 	reset_mask = evergreen_gpu_check_soft_reset(rdev);
4077 
4078 	if (!reset_mask)
4079 		r600_set_bios_scratch_engine_hung(rdev, false);
4080 
4081 	return 0;
4082 }
4083 
4084 /**
4085  * evergreen_gfx_is_lockup - Check if the GFX engine is locked up
4086  *
4087  * @rdev: radeon_device pointer
4088  * @ring: radeon_ring structure holding ring information
4089  *
4090  * Check if the GFX engine is locked up.
4091  * Returns true if the engine appears to be locked up, false if not.
4092  */
evergreen_gfx_is_lockup(struct radeon_device * rdev,struct radeon_ring * ring)4093 bool evergreen_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
4094 {
4095 	u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
4096 
4097 	if (!(reset_mask & (RADEON_RESET_GFX |
4098 			    RADEON_RESET_COMPUTE |
4099 			    RADEON_RESET_CP))) {
4100 		radeon_ring_lockup_update(rdev, ring);
4101 		return false;
4102 	}
4103 	return radeon_ring_test_lockup(rdev, ring);
4104 }
4105 
4106 /*
4107  * RLC
4108  */
4109 #define RLC_SAVE_RESTORE_LIST_END_MARKER    0x00000000
4110 #define RLC_CLEAR_STATE_END_MARKER          0x00000001
4111 
sumo_rlc_fini(struct radeon_device * rdev)4112 void sumo_rlc_fini(struct radeon_device *rdev)
4113 {
4114 	int r;
4115 
4116 	/* save restore block */
4117 	if (rdev->rlc.save_restore_obj) {
4118 		r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
4119 		if (unlikely(r != 0))
4120 			dev_warn(rdev->dev, "(%d) reserve RLC sr bo failed\n", r);
4121 		radeon_bo_unpin(rdev->rlc.save_restore_obj);
4122 		radeon_bo_unreserve(rdev->rlc.save_restore_obj);
4123 
4124 		radeon_bo_unref(&rdev->rlc.save_restore_obj);
4125 		rdev->rlc.save_restore_obj = NULL;
4126 	}
4127 
4128 	/* clear state block */
4129 	if (rdev->rlc.clear_state_obj) {
4130 		r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
4131 		if (unlikely(r != 0))
4132 			dev_warn(rdev->dev, "(%d) reserve RLC c bo failed\n", r);
4133 		radeon_bo_unpin(rdev->rlc.clear_state_obj);
4134 		radeon_bo_unreserve(rdev->rlc.clear_state_obj);
4135 
4136 		radeon_bo_unref(&rdev->rlc.clear_state_obj);
4137 		rdev->rlc.clear_state_obj = NULL;
4138 	}
4139 
4140 	/* clear state block */
4141 	if (rdev->rlc.cp_table_obj) {
4142 		r = radeon_bo_reserve(rdev->rlc.cp_table_obj, false);
4143 		if (unlikely(r != 0))
4144 			dev_warn(rdev->dev, "(%d) reserve RLC cp table bo failed\n", r);
4145 		radeon_bo_unpin(rdev->rlc.cp_table_obj);
4146 		radeon_bo_unreserve(rdev->rlc.cp_table_obj);
4147 
4148 		radeon_bo_unref(&rdev->rlc.cp_table_obj);
4149 		rdev->rlc.cp_table_obj = NULL;
4150 	}
4151 }
4152 
4153 #define CP_ME_TABLE_SIZE    96
4154 
sumo_rlc_init(struct radeon_device * rdev)4155 int sumo_rlc_init(struct radeon_device *rdev)
4156 {
4157 	const u32 *src_ptr;
4158 	volatile u32 *dst_ptr;
4159 	u32 dws, data, i, j, k, reg_num;
4160 	u32 reg_list_num, reg_list_hdr_blk_index, reg_list_blk_index = 0;
4161 	u64 reg_list_mc_addr;
4162 	const struct cs_section_def *cs_data;
4163 	int r;
4164 
4165 	src_ptr = rdev->rlc.reg_list;
4166 	dws = rdev->rlc.reg_list_size;
4167 	if (rdev->family >= CHIP_BONAIRE) {
4168 		dws += (5 * 16) + 48 + 48 + 64;
4169 	}
4170 	cs_data = rdev->rlc.cs_data;
4171 
4172 	if (src_ptr) {
4173 		/* save restore block */
4174 		if (rdev->rlc.save_restore_obj == NULL) {
4175 			r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
4176 					     RADEON_GEM_DOMAIN_VRAM, 0, NULL,
4177 					     NULL, &rdev->rlc.save_restore_obj);
4178 			if (r) {
4179 				dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r);
4180 				return r;
4181 			}
4182 		}
4183 
4184 		r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
4185 		if (unlikely(r != 0)) {
4186 			sumo_rlc_fini(rdev);
4187 			return r;
4188 		}
4189 		r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM,
4190 				  &rdev->rlc.save_restore_gpu_addr);
4191 		if (r) {
4192 			radeon_bo_unreserve(rdev->rlc.save_restore_obj);
4193 			dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r);
4194 			sumo_rlc_fini(rdev);
4195 			return r;
4196 		}
4197 
4198 		r = radeon_bo_kmap(rdev->rlc.save_restore_obj, (void **)&rdev->rlc.sr_ptr);
4199 		if (r) {
4200 			dev_warn(rdev->dev, "(%d) map RLC sr bo failed\n", r);
4201 			sumo_rlc_fini(rdev);
4202 			return r;
4203 		}
4204 		/* write the sr buffer */
4205 		dst_ptr = rdev->rlc.sr_ptr;
4206 		if (rdev->family >= CHIP_TAHITI) {
4207 			/* SI */
4208 			for (i = 0; i < rdev->rlc.reg_list_size; i++)
4209 				dst_ptr[i] = cpu_to_le32(src_ptr[i]);
4210 		} else {
4211 			/* ON/LN/TN */
4212 			/* format:
4213 			 * dw0: (reg2 << 16) | reg1
4214 			 * dw1: reg1 save space
4215 			 * dw2: reg2 save space
4216 			 */
4217 			for (i = 0; i < dws; i++) {
4218 				data = src_ptr[i] >> 2;
4219 				i++;
4220 				if (i < dws)
4221 					data |= (src_ptr[i] >> 2) << 16;
4222 				j = (((i - 1) * 3) / 2);
4223 				dst_ptr[j] = cpu_to_le32(data);
4224 			}
4225 			j = ((i * 3) / 2);
4226 			dst_ptr[j] = cpu_to_le32(RLC_SAVE_RESTORE_LIST_END_MARKER);
4227 		}
4228 		radeon_bo_kunmap(rdev->rlc.save_restore_obj);
4229 		radeon_bo_unreserve(rdev->rlc.save_restore_obj);
4230 	}
4231 
4232 	if (cs_data) {
4233 		/* clear state block */
4234 		if (rdev->family >= CHIP_BONAIRE) {
4235 			rdev->rlc.clear_state_size = dws = cik_get_csb_size(rdev);
4236 		} else if (rdev->family >= CHIP_TAHITI) {
4237 			rdev->rlc.clear_state_size = si_get_csb_size(rdev);
4238 			dws = rdev->rlc.clear_state_size + (256 / 4);
4239 		} else {
4240 			reg_list_num = 0;
4241 			dws = 0;
4242 			for (i = 0; cs_data[i].section != NULL; i++) {
4243 				for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
4244 					reg_list_num++;
4245 					dws += cs_data[i].section[j].reg_count;
4246 				}
4247 			}
4248 			reg_list_blk_index = (3 * reg_list_num + 2);
4249 			dws += reg_list_blk_index;
4250 			rdev->rlc.clear_state_size = dws;
4251 		}
4252 
4253 		if (rdev->rlc.clear_state_obj == NULL) {
4254 			r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
4255 					     RADEON_GEM_DOMAIN_VRAM, 0, NULL,
4256 					     NULL, &rdev->rlc.clear_state_obj);
4257 			if (r) {
4258 				dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
4259 				sumo_rlc_fini(rdev);
4260 				return r;
4261 			}
4262 		}
4263 		r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
4264 		if (unlikely(r != 0)) {
4265 			sumo_rlc_fini(rdev);
4266 			return r;
4267 		}
4268 		r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM,
4269 				  &rdev->rlc.clear_state_gpu_addr);
4270 		if (r) {
4271 			radeon_bo_unreserve(rdev->rlc.clear_state_obj);
4272 			dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r);
4273 			sumo_rlc_fini(rdev);
4274 			return r;
4275 		}
4276 
4277 		r = radeon_bo_kmap(rdev->rlc.clear_state_obj, (void **)&rdev->rlc.cs_ptr);
4278 		if (r) {
4279 			dev_warn(rdev->dev, "(%d) map RLC c bo failed\n", r);
4280 			sumo_rlc_fini(rdev);
4281 			return r;
4282 		}
4283 		/* set up the cs buffer */
4284 		dst_ptr = rdev->rlc.cs_ptr;
4285 		if (rdev->family >= CHIP_BONAIRE) {
4286 			cik_get_csb_buffer(rdev, dst_ptr);
4287 		} else if (rdev->family >= CHIP_TAHITI) {
4288 			reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + 256;
4289 			dst_ptr[0] = cpu_to_le32(upper_32_bits(reg_list_mc_addr));
4290 			dst_ptr[1] = cpu_to_le32(lower_32_bits(reg_list_mc_addr));
4291 			dst_ptr[2] = cpu_to_le32(rdev->rlc.clear_state_size);
4292 			si_get_csb_buffer(rdev, &dst_ptr[(256/4)]);
4293 		} else {
4294 			reg_list_hdr_blk_index = 0;
4295 			reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + (reg_list_blk_index * 4);
4296 			data = upper_32_bits(reg_list_mc_addr);
4297 			dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
4298 			reg_list_hdr_blk_index++;
4299 			for (i = 0; cs_data[i].section != NULL; i++) {
4300 				for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
4301 					reg_num = cs_data[i].section[j].reg_count;
4302 					data = reg_list_mc_addr & 0xffffffff;
4303 					dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
4304 					reg_list_hdr_blk_index++;
4305 
4306 					data = (cs_data[i].section[j].reg_index * 4) & 0xffffffff;
4307 					dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
4308 					reg_list_hdr_blk_index++;
4309 
4310 					data = 0x08000000 | (reg_num * 4);
4311 					dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
4312 					reg_list_hdr_blk_index++;
4313 
4314 					for (k = 0; k < reg_num; k++) {
4315 						data = cs_data[i].section[j].extent[k];
4316 						dst_ptr[reg_list_blk_index + k] = cpu_to_le32(data);
4317 					}
4318 					reg_list_mc_addr += reg_num * 4;
4319 					reg_list_blk_index += reg_num;
4320 				}
4321 			}
4322 			dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(RLC_CLEAR_STATE_END_MARKER);
4323 		}
4324 		radeon_bo_kunmap(rdev->rlc.clear_state_obj);
4325 		radeon_bo_unreserve(rdev->rlc.clear_state_obj);
4326 	}
4327 
4328 	if (rdev->rlc.cp_table_size) {
4329 		if (rdev->rlc.cp_table_obj == NULL) {
4330 			r = radeon_bo_create(rdev, rdev->rlc.cp_table_size,
4331 					     PAGE_SIZE, true,
4332 					     RADEON_GEM_DOMAIN_VRAM, 0, NULL,
4333 					     NULL, &rdev->rlc.cp_table_obj);
4334 			if (r) {
4335 				dev_warn(rdev->dev, "(%d) create RLC cp table bo failed\n", r);
4336 				sumo_rlc_fini(rdev);
4337 				return r;
4338 			}
4339 		}
4340 
4341 		r = radeon_bo_reserve(rdev->rlc.cp_table_obj, false);
4342 		if (unlikely(r != 0)) {
4343 			dev_warn(rdev->dev, "(%d) reserve RLC cp table bo failed\n", r);
4344 			sumo_rlc_fini(rdev);
4345 			return r;
4346 		}
4347 		r = radeon_bo_pin(rdev->rlc.cp_table_obj, RADEON_GEM_DOMAIN_VRAM,
4348 				  &rdev->rlc.cp_table_gpu_addr);
4349 		if (r) {
4350 			radeon_bo_unreserve(rdev->rlc.cp_table_obj);
4351 			dev_warn(rdev->dev, "(%d) pin RLC cp_table bo failed\n", r);
4352 			sumo_rlc_fini(rdev);
4353 			return r;
4354 		}
4355 		r = radeon_bo_kmap(rdev->rlc.cp_table_obj, (void **)&rdev->rlc.cp_table_ptr);
4356 		if (r) {
4357 			dev_warn(rdev->dev, "(%d) map RLC cp table bo failed\n", r);
4358 			sumo_rlc_fini(rdev);
4359 			return r;
4360 		}
4361 
4362 		cik_init_cp_pg_table(rdev);
4363 
4364 		radeon_bo_kunmap(rdev->rlc.cp_table_obj);
4365 		radeon_bo_unreserve(rdev->rlc.cp_table_obj);
4366 
4367 	}
4368 
4369 	return 0;
4370 }
4371 
evergreen_rlc_start(struct radeon_device * rdev)4372 static void evergreen_rlc_start(struct radeon_device *rdev)
4373 {
4374 	u32 mask = RLC_ENABLE;
4375 
4376 	if (rdev->flags & RADEON_IS_IGP) {
4377 		mask |= GFX_POWER_GATING_ENABLE | GFX_POWER_GATING_SRC;
4378 	}
4379 
4380 	WREG32(RLC_CNTL, mask);
4381 }
4382 
evergreen_rlc_resume(struct radeon_device * rdev)4383 int evergreen_rlc_resume(struct radeon_device *rdev)
4384 {
4385 	u32 i;
4386 	const __be32 *fw_data;
4387 
4388 	if (!rdev->rlc_fw)
4389 		return -EINVAL;
4390 
4391 	r600_rlc_stop(rdev);
4392 
4393 	WREG32(RLC_HB_CNTL, 0);
4394 
4395 	if (rdev->flags & RADEON_IS_IGP) {
4396 		if (rdev->family == CHIP_ARUBA) {
4397 			u32 always_on_bitmap =
4398 				3 | (3 << (16 * rdev->config.cayman.max_shader_engines));
4399 			/* find out the number of active simds */
4400 			u32 tmp = (RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffff0000) >> 16;
4401 			tmp |= 0xffffffff << rdev->config.cayman.max_simds_per_se;
4402 			tmp = hweight32(~tmp);
4403 			if (tmp == rdev->config.cayman.max_simds_per_se) {
4404 				WREG32(TN_RLC_LB_ALWAYS_ACTIVE_SIMD_MASK, always_on_bitmap);
4405 				WREG32(TN_RLC_LB_PARAMS, 0x00601004);
4406 				WREG32(TN_RLC_LB_INIT_SIMD_MASK, 0xffffffff);
4407 				WREG32(TN_RLC_LB_CNTR_INIT, 0x00000000);
4408 				WREG32(TN_RLC_LB_CNTR_MAX, 0x00002000);
4409 			}
4410 		} else {
4411 			WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
4412 			WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
4413 		}
4414 		WREG32(TN_RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
4415 		WREG32(TN_RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
4416 	} else {
4417 		WREG32(RLC_HB_BASE, 0);
4418 		WREG32(RLC_HB_RPTR, 0);
4419 		WREG32(RLC_HB_WPTR, 0);
4420 		WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
4421 		WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
4422 	}
4423 	WREG32(RLC_MC_CNTL, 0);
4424 	WREG32(RLC_UCODE_CNTL, 0);
4425 
4426 	fw_data = (const __be32 *)rdev->rlc_fw->data;
4427 	if (rdev->family >= CHIP_ARUBA) {
4428 		for (i = 0; i < ARUBA_RLC_UCODE_SIZE; i++) {
4429 			WREG32(RLC_UCODE_ADDR, i);
4430 			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
4431 		}
4432 	} else if (rdev->family >= CHIP_CAYMAN) {
4433 		for (i = 0; i < CAYMAN_RLC_UCODE_SIZE; i++) {
4434 			WREG32(RLC_UCODE_ADDR, i);
4435 			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
4436 		}
4437 	} else {
4438 		for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
4439 			WREG32(RLC_UCODE_ADDR, i);
4440 			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
4441 		}
4442 	}
4443 	WREG32(RLC_UCODE_ADDR, 0);
4444 
4445 	evergreen_rlc_start(rdev);
4446 
4447 	return 0;
4448 }
4449 
4450 /* Interrupts */
4451 
evergreen_get_vblank_counter(struct radeon_device * rdev,int crtc)4452 u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc)
4453 {
4454 	if (crtc >= rdev->num_crtc)
4455 		return 0;
4456 	else
4457 		return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
4458 }
4459 
evergreen_disable_interrupt_state(struct radeon_device * rdev)4460 void evergreen_disable_interrupt_state(struct radeon_device *rdev)
4461 {
4462 	int i;
4463 	u32 tmp;
4464 
4465 	if (rdev->family >= CHIP_CAYMAN) {
4466 		cayman_cp_int_cntl_setup(rdev, 0,
4467 					 CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
4468 		cayman_cp_int_cntl_setup(rdev, 1, 0);
4469 		cayman_cp_int_cntl_setup(rdev, 2, 0);
4470 		tmp = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE;
4471 		WREG32(CAYMAN_DMA1_CNTL, tmp);
4472 	} else
4473 		WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
4474 	tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
4475 	WREG32(DMA_CNTL, tmp);
4476 	WREG32(GRBM_INT_CNTL, 0);
4477 	WREG32(SRBM_INT_CNTL, 0);
4478 	for (i = 0; i < rdev->num_crtc; i++)
4479 		WREG32(INT_MASK + crtc_offsets[i], 0);
4480 	for (i = 0; i < rdev->num_crtc; i++)
4481 		WREG32(GRPH_INT_CONTROL + crtc_offsets[i], 0);
4482 
4483 	/* only one DAC on DCE5 */
4484 	if (!ASIC_IS_DCE5(rdev))
4485 		WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
4486 	WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
4487 
4488 	for (i = 0; i < 6; i++)
4489 		WREG32_AND(DC_HPDx_INT_CONTROL(i), DC_HPDx_INT_POLARITY);
4490 }
4491 
4492 /* Note that the order we write back regs here is important */
evergreen_irq_set(struct radeon_device * rdev)4493 int evergreen_irq_set(struct radeon_device *rdev)
4494 {
4495 	int i;
4496 	u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
4497 	u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
4498 	u32 grbm_int_cntl = 0;
4499 	u32 dma_cntl, dma_cntl1 = 0;
4500 	u32 thermal_int = 0;
4501 
4502 	if (!rdev->irq.installed) {
4503 		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
4504 		return -EINVAL;
4505 	}
4506 	/* don't enable anything if the ih is disabled */
4507 	if (!rdev->ih.enabled) {
4508 		r600_disable_interrupts(rdev);
4509 		/* force the active interrupt state to all disabled */
4510 		evergreen_disable_interrupt_state(rdev);
4511 		return 0;
4512 	}
4513 
4514 	if (rdev->family == CHIP_ARUBA)
4515 		thermal_int = RREG32(TN_CG_THERMAL_INT_CTRL) &
4516 			~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
4517 	else
4518 		thermal_int = RREG32(CG_THERMAL_INT) &
4519 			~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
4520 
4521 	dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
4522 
4523 	if (rdev->family >= CHIP_CAYMAN) {
4524 		/* enable CP interrupts on all rings */
4525 		if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
4526 			DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
4527 			cp_int_cntl |= TIME_STAMP_INT_ENABLE;
4528 		}
4529 		if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
4530 			DRM_DEBUG("evergreen_irq_set: sw int cp1\n");
4531 			cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
4532 		}
4533 		if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
4534 			DRM_DEBUG("evergreen_irq_set: sw int cp2\n");
4535 			cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
4536 		}
4537 	} else {
4538 		if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
4539 			DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
4540 			cp_int_cntl |= RB_INT_ENABLE;
4541 			cp_int_cntl |= TIME_STAMP_INT_ENABLE;
4542 		}
4543 	}
4544 
4545 	if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
4546 		DRM_DEBUG("r600_irq_set: sw int dma\n");
4547 		dma_cntl |= TRAP_ENABLE;
4548 	}
4549 
4550 	if (rdev->family >= CHIP_CAYMAN) {
4551 		dma_cntl1 = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE;
4552 		if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
4553 			DRM_DEBUG("r600_irq_set: sw int dma1\n");
4554 			dma_cntl1 |= TRAP_ENABLE;
4555 		}
4556 	}
4557 
4558 	if (rdev->irq.dpm_thermal) {
4559 		DRM_DEBUG("dpm thermal\n");
4560 		thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
4561 	}
4562 
4563 	if (rdev->family >= CHIP_CAYMAN) {
4564 		cayman_cp_int_cntl_setup(rdev, 0, cp_int_cntl);
4565 		cayman_cp_int_cntl_setup(rdev, 1, cp_int_cntl1);
4566 		cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2);
4567 	} else
4568 		WREG32(CP_INT_CNTL, cp_int_cntl);
4569 
4570 	WREG32(DMA_CNTL, dma_cntl);
4571 
4572 	if (rdev->family >= CHIP_CAYMAN)
4573 		WREG32(CAYMAN_DMA1_CNTL, dma_cntl1);
4574 
4575 	WREG32(GRBM_INT_CNTL, grbm_int_cntl);
4576 
4577 	for (i = 0; i < rdev->num_crtc; i++) {
4578 		radeon_irq_kms_set_irq_n_enabled(
4579 		    rdev, INT_MASK + crtc_offsets[i],
4580 		    VBLANK_INT_MASK,
4581 		    rdev->irq.crtc_vblank_int[i] ||
4582 		    atomic_read(&rdev->irq.pflip[i]), "vblank", i);
4583 	}
4584 
4585 	for (i = 0; i < rdev->num_crtc; i++)
4586 		WREG32(GRPH_INT_CONTROL + crtc_offsets[i], GRPH_PFLIP_INT_MASK);
4587 
4588 	for (i = 0; i < 6; i++) {
4589 		radeon_irq_kms_set_irq_n_enabled(
4590 		    rdev, DC_HPDx_INT_CONTROL(i),
4591 		    DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN,
4592 		    rdev->irq.hpd[i], "HPD", i);
4593 	}
4594 
4595 	if (rdev->family == CHIP_ARUBA)
4596 		WREG32(TN_CG_THERMAL_INT_CTRL, thermal_int);
4597 	else
4598 		WREG32(CG_THERMAL_INT, thermal_int);
4599 
4600 	for (i = 0; i < 6; i++) {
4601 		radeon_irq_kms_set_irq_n_enabled(
4602 		    rdev, AFMT_AUDIO_PACKET_CONTROL + crtc_offsets[i],
4603 		    AFMT_AZ_FORMAT_WTRIG_MASK,
4604 		    rdev->irq.afmt[i], "HDMI", i);
4605 	}
4606 
4607 	/* posting read */
4608 	RREG32(SRBM_STATUS);
4609 
4610 	return 0;
4611 }
4612 
4613 /* Note that the order we write back regs here is important */
evergreen_irq_ack(struct radeon_device * rdev)4614 static void evergreen_irq_ack(struct radeon_device *rdev)
4615 {
4616 	int i, j;
4617 	u32 *grph_int = rdev->irq.stat_regs.evergreen.grph_int;
4618 	u32 *disp_int = rdev->irq.stat_regs.evergreen.disp_int;
4619 	u32 *afmt_status = rdev->irq.stat_regs.evergreen.afmt_status;
4620 
4621 	for (i = 0; i < 6; i++) {
4622 		disp_int[i] = RREG32(evergreen_disp_int_status[i]);
4623 		afmt_status[i] = RREG32(AFMT_STATUS + crtc_offsets[i]);
4624 		if (i < rdev->num_crtc)
4625 			grph_int[i] = RREG32(GRPH_INT_STATUS + crtc_offsets[i]);
4626 	}
4627 
4628 	/* We write back each interrupt register in pairs of two */
4629 	for (i = 0; i < rdev->num_crtc; i += 2) {
4630 		for (j = i; j < (i + 2); j++) {
4631 			if (grph_int[j] & GRPH_PFLIP_INT_OCCURRED)
4632 				WREG32(GRPH_INT_STATUS + crtc_offsets[j],
4633 				       GRPH_PFLIP_INT_CLEAR);
4634 		}
4635 
4636 		for (j = i; j < (i + 2); j++) {
4637 			if (disp_int[j] & LB_D1_VBLANK_INTERRUPT)
4638 				WREG32(VBLANK_STATUS + crtc_offsets[j],
4639 				       VBLANK_ACK);
4640 			if (disp_int[j] & LB_D1_VLINE_INTERRUPT)
4641 				WREG32(VLINE_STATUS + crtc_offsets[j],
4642 				       VLINE_ACK);
4643 		}
4644 	}
4645 
4646 	for (i = 0; i < 6; i++) {
4647 		if (disp_int[i] & DC_HPD1_INTERRUPT)
4648 			WREG32_OR(DC_HPDx_INT_CONTROL(i), DC_HPDx_INT_ACK);
4649 	}
4650 
4651 	for (i = 0; i < 6; i++) {
4652 		if (disp_int[i] & DC_HPD1_RX_INTERRUPT)
4653 			WREG32_OR(DC_HPDx_INT_CONTROL(i), DC_HPDx_RX_INT_ACK);
4654 	}
4655 
4656 	for (i = 0; i < 6; i++) {
4657 		if (afmt_status[i] & AFMT_AZ_FORMAT_WTRIG)
4658 			WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + crtc_offsets[i],
4659 				  AFMT_AZ_FORMAT_WTRIG_ACK);
4660 	}
4661 }
4662 
evergreen_irq_disable(struct radeon_device * rdev)4663 static void evergreen_irq_disable(struct radeon_device *rdev)
4664 {
4665 	r600_disable_interrupts(rdev);
4666 	/* Wait and acknowledge irq */
4667 	mdelay(1);
4668 	evergreen_irq_ack(rdev);
4669 	evergreen_disable_interrupt_state(rdev);
4670 }
4671 
evergreen_irq_suspend(struct radeon_device * rdev)4672 void evergreen_irq_suspend(struct radeon_device *rdev)
4673 {
4674 	evergreen_irq_disable(rdev);
4675 	r600_rlc_stop(rdev);
4676 }
4677 
evergreen_get_ih_wptr(struct radeon_device * rdev)4678 static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
4679 {
4680 	u32 wptr, tmp;
4681 
4682 	if (rdev->wb.enabled)
4683 		wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
4684 	else
4685 		wptr = RREG32(IH_RB_WPTR);
4686 
4687 	if (wptr & RB_OVERFLOW) {
4688 		wptr &= ~RB_OVERFLOW;
4689 		/* When a ring buffer overflow happen start parsing interrupt
4690 		 * from the last not overwritten vector (wptr + 16). Hopefully
4691 		 * this should allow us to catchup.
4692 		 */
4693 		dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
4694 			 wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
4695 		rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
4696 		tmp = RREG32(IH_RB_CNTL);
4697 		tmp |= IH_WPTR_OVERFLOW_CLEAR;
4698 		WREG32(IH_RB_CNTL, tmp);
4699 	}
4700 	return (wptr & rdev->ih.ptr_mask);
4701 }
4702 
evergreen_irq_process(struct radeon_device * rdev)4703 int evergreen_irq_process(struct radeon_device *rdev)
4704 {
4705 	u32 *disp_int = rdev->irq.stat_regs.evergreen.disp_int;
4706 	u32 *afmt_status = rdev->irq.stat_regs.evergreen.afmt_status;
4707 	u32 crtc_idx, hpd_idx, afmt_idx;
4708 	u32 mask;
4709 	u32 wptr;
4710 	u32 rptr;
4711 	u32 src_id, src_data;
4712 	u32 ring_index;
4713 	bool queue_hotplug = false;
4714 	bool queue_hdmi = false;
4715 	bool queue_dp = false;
4716 	bool queue_thermal = false;
4717 	u32 status, addr;
4718 	const char *event_name;
4719 
4720 	if (!rdev->ih.enabled || rdev->shutdown)
4721 		return IRQ_NONE;
4722 
4723 	wptr = evergreen_get_ih_wptr(rdev);
4724 
4725 restart_ih:
4726 	/* is somebody else already processing irqs? */
4727 	if (atomic_xchg(&rdev->ih.lock, 1))
4728 		return IRQ_NONE;
4729 
4730 	rptr = rdev->ih.rptr;
4731 	DRM_DEBUG("evergreen_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
4732 
4733 	/* Order reading of wptr vs. reading of IH ring data */
4734 	rmb();
4735 
4736 	/* display interrupts */
4737 	evergreen_irq_ack(rdev);
4738 
4739 	while (rptr != wptr) {
4740 		/* wptr/rptr are in bytes! */
4741 		ring_index = rptr / 4;
4742 		src_id =  le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
4743 		src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
4744 
4745 		switch (src_id) {
4746 		case 1: /* D1 vblank/vline */
4747 		case 2: /* D2 vblank/vline */
4748 		case 3: /* D3 vblank/vline */
4749 		case 4: /* D4 vblank/vline */
4750 		case 5: /* D5 vblank/vline */
4751 		case 6: /* D6 vblank/vline */
4752 			crtc_idx = src_id - 1;
4753 
4754 			if (src_data == 0) { /* vblank */
4755 				mask = LB_D1_VBLANK_INTERRUPT;
4756 				event_name = "vblank";
4757 
4758 				if (rdev->irq.crtc_vblank_int[crtc_idx]) {
4759 					drm_handle_vblank(rdev->ddev, crtc_idx);
4760 					rdev->pm.vblank_sync = true;
4761 					wake_up(&rdev->irq.vblank_queue);
4762 				}
4763 				if (atomic_read(&rdev->irq.pflip[crtc_idx])) {
4764 					radeon_crtc_handle_vblank(rdev,
4765 								  crtc_idx);
4766 				}
4767 
4768 			} else if (src_data == 1) { /* vline */
4769 				mask = LB_D1_VLINE_INTERRUPT;
4770 				event_name = "vline";
4771 			} else {
4772 				DRM_DEBUG("Unhandled interrupt: %d %d\n",
4773 					  src_id, src_data);
4774 				break;
4775 			}
4776 
4777 			if (!(disp_int[crtc_idx] & mask)) {
4778 				DRM_DEBUG("IH: D%d %s - IH event w/o asserted irq bit?\n",
4779 					  crtc_idx + 1, event_name);
4780 			}
4781 
4782 			disp_int[crtc_idx] &= ~mask;
4783 			DRM_DEBUG("IH: D%d %s\n", crtc_idx + 1, event_name);
4784 
4785 			break;
4786 		case 8: /* D1 page flip */
4787 		case 10: /* D2 page flip */
4788 		case 12: /* D3 page flip */
4789 		case 14: /* D4 page flip */
4790 		case 16: /* D5 page flip */
4791 		case 18: /* D6 page flip */
4792 			DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
4793 			if (radeon_use_pflipirq > 0)
4794 				radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
4795 			break;
4796 		case 42: /* HPD hotplug */
4797 			if (src_data <= 5) {
4798 				hpd_idx = src_data;
4799 				mask = DC_HPD1_INTERRUPT;
4800 				queue_hotplug = true;
4801 				event_name = "HPD";
4802 
4803 			} else if (src_data <= 11) {
4804 				hpd_idx = src_data - 6;
4805 				mask = DC_HPD1_RX_INTERRUPT;
4806 				queue_dp = true;
4807 				event_name = "HPD_RX";
4808 
4809 			} else {
4810 				DRM_DEBUG("Unhandled interrupt: %d %d\n",
4811 					  src_id, src_data);
4812 				break;
4813 			}
4814 
4815 			if (!(disp_int[hpd_idx] & mask))
4816 				DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
4817 
4818 			disp_int[hpd_idx] &= ~mask;
4819 			DRM_DEBUG("IH: %s%d\n", event_name, hpd_idx + 1);
4820 
4821 			break;
4822 		case 44: /* hdmi */
4823 			afmt_idx = src_data;
4824 			if (afmt_idx > 5) {
4825 				DRM_ERROR("Unhandled interrupt: %d %d\n",
4826 					  src_id, src_data);
4827 				break;
4828 			}
4829 
4830 			if (!(afmt_status[afmt_idx] & AFMT_AZ_FORMAT_WTRIG))
4831 				DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
4832 
4833 			afmt_status[afmt_idx] &= ~AFMT_AZ_FORMAT_WTRIG;
4834 			queue_hdmi = true;
4835 			DRM_DEBUG("IH: HDMI%d\n", afmt_idx + 1);
4836 			break;
4837 		case 96:
4838 			DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR));
4839 			WREG32(SRBM_INT_ACK, 0x1);
4840 			break;
4841 		case 124: /* UVD */
4842 			DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
4843 			radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
4844 			break;
4845 		case 146:
4846 		case 147:
4847 			addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
4848 			status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
4849 			/* reset addr and status */
4850 			WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
4851 			if (addr == 0x0 && status == 0x0)
4852 				break;
4853 			dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
4854 			dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
4855 				addr);
4856 			dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
4857 				status);
4858 			cayman_vm_decode_fault(rdev, status, addr);
4859 			break;
4860 		case 176: /* CP_INT in ring buffer */
4861 		case 177: /* CP_INT in IB1 */
4862 		case 178: /* CP_INT in IB2 */
4863 			DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
4864 			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
4865 			break;
4866 		case 181: /* CP EOP event */
4867 			DRM_DEBUG("IH: CP EOP\n");
4868 			if (rdev->family >= CHIP_CAYMAN) {
4869 				switch (src_data) {
4870 				case 0:
4871 					radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
4872 					break;
4873 				case 1:
4874 					radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
4875 					break;
4876 				case 2:
4877 					radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
4878 					break;
4879 				}
4880 			} else
4881 				radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
4882 			break;
4883 		case 224: /* DMA trap event */
4884 			DRM_DEBUG("IH: DMA trap\n");
4885 			radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
4886 			break;
4887 		case 230: /* thermal low to high */
4888 			DRM_DEBUG("IH: thermal low to high\n");
4889 			rdev->pm.dpm.thermal.high_to_low = false;
4890 			queue_thermal = true;
4891 			break;
4892 		case 231: /* thermal high to low */
4893 			DRM_DEBUG("IH: thermal high to low\n");
4894 			rdev->pm.dpm.thermal.high_to_low = true;
4895 			queue_thermal = true;
4896 			break;
4897 		case 233: /* GUI IDLE */
4898 			DRM_DEBUG("IH: GUI idle\n");
4899 			break;
4900 		case 244: /* DMA trap event */
4901 			if (rdev->family >= CHIP_CAYMAN) {
4902 				DRM_DEBUG("IH: DMA1 trap\n");
4903 				radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
4904 			}
4905 			break;
4906 		default:
4907 			DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
4908 			break;
4909 		}
4910 
4911 		/* wptr/rptr are in bytes! */
4912 		rptr += 16;
4913 		rptr &= rdev->ih.ptr_mask;
4914 		WREG32(IH_RB_RPTR, rptr);
4915 	}
4916 	if (queue_dp)
4917 		schedule_work(&rdev->dp_work);
4918 	if (queue_hotplug)
4919 		schedule_delayed_work(&rdev->hotplug_work, 0);
4920 	if (queue_hdmi)
4921 		schedule_work(&rdev->audio_work);
4922 	if (queue_thermal && rdev->pm.dpm_enabled)
4923 		schedule_work(&rdev->pm.dpm.thermal.work);
4924 	rdev->ih.rptr = rptr;
4925 	atomic_set(&rdev->ih.lock, 0);
4926 
4927 	/* make sure wptr hasn't changed while processing */
4928 	wptr = evergreen_get_ih_wptr(rdev);
4929 	if (wptr != rptr)
4930 		goto restart_ih;
4931 
4932 	return IRQ_HANDLED;
4933 }
4934 
evergreen_uvd_init(struct radeon_device * rdev)4935 static void evergreen_uvd_init(struct radeon_device *rdev)
4936 {
4937 	int r;
4938 
4939 	if (!rdev->has_uvd)
4940 		return;
4941 
4942 	r = radeon_uvd_init(rdev);
4943 	if (r) {
4944 		dev_err(rdev->dev, "failed UVD (%d) init.\n", r);
4945 		/*
4946 		 * At this point rdev->uvd.vcpu_bo is NULL which trickles down
4947 		 * to early fails uvd_v2_2_resume() and thus nothing happens
4948 		 * there. So it is pointless to try to go through that code
4949 		 * hence why we disable uvd here.
4950 		 */
4951 		rdev->has_uvd = false;
4952 		return;
4953 	}
4954 	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
4955 	r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
4956 }
4957 
evergreen_uvd_start(struct radeon_device * rdev)4958 static void evergreen_uvd_start(struct radeon_device *rdev)
4959 {
4960 	int r;
4961 
4962 	if (!rdev->has_uvd)
4963 		return;
4964 
4965 	r = uvd_v2_2_resume(rdev);
4966 	if (r) {
4967 		dev_err(rdev->dev, "failed UVD resume (%d).\n", r);
4968 		goto error;
4969 	}
4970 	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
4971 	if (r) {
4972 		dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
4973 		goto error;
4974 	}
4975 	return;
4976 
4977 error:
4978 	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
4979 }
4980 
evergreen_uvd_resume(struct radeon_device * rdev)4981 static void evergreen_uvd_resume(struct radeon_device *rdev)
4982 {
4983 	struct radeon_ring *ring;
4984 	int r;
4985 
4986 	if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size)
4987 		return;
4988 
4989 	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
4990 	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
4991 	if (r) {
4992 		dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
4993 		return;
4994 	}
4995 	r = uvd_v1_0_init(rdev);
4996 	if (r) {
4997 		dev_err(rdev->dev, "failed initializing UVD (%d).\n", r);
4998 		return;
4999 	}
5000 }
5001 
evergreen_startup(struct radeon_device * rdev)5002 static int evergreen_startup(struct radeon_device *rdev)
5003 {
5004 	struct radeon_ring *ring;
5005 	int r;
5006 
5007 	/* enable pcie gen2 link */
5008 	evergreen_pcie_gen2_enable(rdev);
5009 	/* enable aspm */
5010 	evergreen_program_aspm(rdev);
5011 
5012 	/* scratch needs to be initialized before MC */
5013 	r = r600_vram_scratch_init(rdev);
5014 	if (r)
5015 		return r;
5016 
5017 	evergreen_mc_program(rdev);
5018 
5019 	if (ASIC_IS_DCE5(rdev) && !rdev->pm.dpm_enabled) {
5020 		r = ni_mc_load_microcode(rdev);
5021 		if (r) {
5022 			DRM_ERROR("Failed to load MC firmware!\n");
5023 			return r;
5024 		}
5025 	}
5026 
5027 	if (rdev->flags & RADEON_IS_AGP) {
5028 		evergreen_agp_enable(rdev);
5029 	} else {
5030 		r = evergreen_pcie_gart_enable(rdev);
5031 		if (r)
5032 			return r;
5033 	}
5034 	evergreen_gpu_init(rdev);
5035 
5036 	/* allocate rlc buffers */
5037 	if (rdev->flags & RADEON_IS_IGP) {
5038 		rdev->rlc.reg_list = sumo_rlc_save_restore_register_list;
5039 		rdev->rlc.reg_list_size =
5040 			(u32)ARRAY_SIZE(sumo_rlc_save_restore_register_list);
5041 		rdev->rlc.cs_data = evergreen_cs_data;
5042 		r = sumo_rlc_init(rdev);
5043 		if (r) {
5044 			DRM_ERROR("Failed to init rlc BOs!\n");
5045 			return r;
5046 		}
5047 	}
5048 
5049 	/* allocate wb buffer */
5050 	r = radeon_wb_init(rdev);
5051 	if (r)
5052 		return r;
5053 
5054 	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
5055 	if (r) {
5056 		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
5057 		return r;
5058 	}
5059 
5060 	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
5061 	if (r) {
5062 		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
5063 		return r;
5064 	}
5065 
5066 	evergreen_uvd_start(rdev);
5067 
5068 	/* Enable IRQ */
5069 	if (!rdev->irq.installed) {
5070 		r = radeon_irq_kms_init(rdev);
5071 		if (r)
5072 			return r;
5073 	}
5074 
5075 	r = r600_irq_init(rdev);
5076 	if (r) {
5077 		DRM_ERROR("radeon: IH init failed (%d).\n", r);
5078 		radeon_irq_kms_fini(rdev);
5079 		return r;
5080 	}
5081 	evergreen_irq_set(rdev);
5082 
5083 	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
5084 	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
5085 			     RADEON_CP_PACKET2);
5086 	if (r)
5087 		return r;
5088 
5089 	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
5090 	r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
5091 			     DMA_PACKET(DMA_PACKET_NOP, 0, 0));
5092 	if (r)
5093 		return r;
5094 
5095 	r = evergreen_cp_load_microcode(rdev);
5096 	if (r)
5097 		return r;
5098 	r = evergreen_cp_resume(rdev);
5099 	if (r)
5100 		return r;
5101 	r = r600_dma_resume(rdev);
5102 	if (r)
5103 		return r;
5104 
5105 	evergreen_uvd_resume(rdev);
5106 
5107 	r = radeon_ib_pool_init(rdev);
5108 	if (r) {
5109 		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
5110 		return r;
5111 	}
5112 
5113 	r = radeon_audio_init(rdev);
5114 	if (r) {
5115 		DRM_ERROR("radeon: audio init failed\n");
5116 		return r;
5117 	}
5118 
5119 	return 0;
5120 }
5121 
evergreen_resume(struct radeon_device * rdev)5122 int evergreen_resume(struct radeon_device *rdev)
5123 {
5124 	int r;
5125 
5126 	/* reset the asic, the gfx blocks are often in a bad state
5127 	 * after the driver is unloaded or after a resume
5128 	 */
5129 	if (radeon_asic_reset(rdev))
5130 		dev_warn(rdev->dev, "GPU reset failed !\n");
5131 	/* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
5132 	 * posting will perform necessary task to bring back GPU into good
5133 	 * shape.
5134 	 */
5135 	/* post card */
5136 	atom_asic_init(rdev->mode_info.atom_context);
5137 
5138 	/* init golden registers */
5139 	evergreen_init_golden_registers(rdev);
5140 
5141 	if (rdev->pm.pm_method == PM_METHOD_DPM)
5142 		radeon_pm_resume(rdev);
5143 
5144 	rdev->accel_working = true;
5145 	r = evergreen_startup(rdev);
5146 	if (r) {
5147 		DRM_ERROR("evergreen startup failed on resume\n");
5148 		rdev->accel_working = false;
5149 		return r;
5150 	}
5151 
5152 	return r;
5153 
5154 }
5155 
evergreen_suspend(struct radeon_device * rdev)5156 int evergreen_suspend(struct radeon_device *rdev)
5157 {
5158 	radeon_pm_suspend(rdev);
5159 	radeon_audio_fini(rdev);
5160 	if (rdev->has_uvd) {
5161 		radeon_uvd_suspend(rdev);
5162 		uvd_v1_0_fini(rdev);
5163 	}
5164 	r700_cp_stop(rdev);
5165 	r600_dma_stop(rdev);
5166 	evergreen_irq_suspend(rdev);
5167 	radeon_wb_disable(rdev);
5168 	evergreen_pcie_gart_disable(rdev);
5169 
5170 	return 0;
5171 }
5172 
5173 /* Plan is to move initialization in that function and use
5174  * helper function so that radeon_device_init pretty much
5175  * do nothing more than calling asic specific function. This
5176  * should also allow to remove a bunch of callback function
5177  * like vram_info.
5178  */
evergreen_init(struct radeon_device * rdev)5179 int evergreen_init(struct radeon_device *rdev)
5180 {
5181 	int r;
5182 
5183 	/* Read BIOS */
5184 	if (!radeon_get_bios(rdev)) {
5185 		if (ASIC_IS_AVIVO(rdev))
5186 			return -EINVAL;
5187 	}
5188 	/* Must be an ATOMBIOS */
5189 	if (!rdev->is_atom_bios) {
5190 		dev_err(rdev->dev, "Expecting atombios for evergreen GPU\n");
5191 		return -EINVAL;
5192 	}
5193 	r = radeon_atombios_init(rdev);
5194 	if (r)
5195 		return r;
5196 	/* reset the asic, the gfx blocks are often in a bad state
5197 	 * after the driver is unloaded or after a resume
5198 	 */
5199 	if (radeon_asic_reset(rdev))
5200 		dev_warn(rdev->dev, "GPU reset failed !\n");
5201 	/* Post card if necessary */
5202 	if (!radeon_card_posted(rdev)) {
5203 		if (!rdev->bios) {
5204 			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
5205 			return -EINVAL;
5206 		}
5207 		DRM_INFO("GPU not posted. posting now...\n");
5208 		atom_asic_init(rdev->mode_info.atom_context);
5209 	}
5210 	/* init golden registers */
5211 	evergreen_init_golden_registers(rdev);
5212 	/* Initialize scratch registers */
5213 	r600_scratch_init(rdev);
5214 	/* Initialize surface registers */
5215 	radeon_surface_init(rdev);
5216 	/* Initialize clocks */
5217 	radeon_get_clock_info(rdev->ddev);
5218 	/* Fence driver */
5219 	radeon_fence_driver_init(rdev);
5220 	/* initialize AGP */
5221 	if (rdev->flags & RADEON_IS_AGP) {
5222 		r = radeon_agp_init(rdev);
5223 		if (r)
5224 			radeon_agp_disable(rdev);
5225 	}
5226 	/* initialize memory controller */
5227 	r = evergreen_mc_init(rdev);
5228 	if (r)
5229 		return r;
5230 	/* Memory manager */
5231 	r = radeon_bo_init(rdev);
5232 	if (r)
5233 		return r;
5234 
5235 	if (ASIC_IS_DCE5(rdev)) {
5236 		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
5237 			r = ni_init_microcode(rdev);
5238 			if (r) {
5239 				DRM_ERROR("Failed to load firmware!\n");
5240 				return r;
5241 			}
5242 		}
5243 	} else {
5244 		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
5245 			r = r600_init_microcode(rdev);
5246 			if (r) {
5247 				DRM_ERROR("Failed to load firmware!\n");
5248 				return r;
5249 			}
5250 		}
5251 	}
5252 
5253 	/* Initialize power management */
5254 	radeon_pm_init(rdev);
5255 
5256 	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
5257 	r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
5258 
5259 	rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
5260 	r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
5261 
5262 	evergreen_uvd_init(rdev);
5263 
5264 	rdev->ih.ring_obj = NULL;
5265 	r600_ih_ring_init(rdev, 64 * 1024);
5266 
5267 	r = r600_pcie_gart_init(rdev);
5268 	if (r)
5269 		return r;
5270 
5271 	rdev->accel_working = true;
5272 	r = evergreen_startup(rdev);
5273 	if (r) {
5274 		dev_err(rdev->dev, "disabling GPU acceleration\n");
5275 		r700_cp_fini(rdev);
5276 		r600_dma_fini(rdev);
5277 		r600_irq_fini(rdev);
5278 		if (rdev->flags & RADEON_IS_IGP)
5279 			sumo_rlc_fini(rdev);
5280 		radeon_wb_fini(rdev);
5281 		radeon_ib_pool_fini(rdev);
5282 		radeon_irq_kms_fini(rdev);
5283 		evergreen_pcie_gart_fini(rdev);
5284 		rdev->accel_working = false;
5285 	}
5286 
5287 	/* Don't start up if the MC ucode is missing on BTC parts.
5288 	 * The default clocks and voltages before the MC ucode
5289 	 * is loaded are not suffient for advanced operations.
5290 	 */
5291 	if (ASIC_IS_DCE5(rdev)) {
5292 		if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
5293 			DRM_ERROR("radeon: MC ucode required for NI+.\n");
5294 			return -EINVAL;
5295 		}
5296 	}
5297 
5298 	return 0;
5299 }
5300 
evergreen_fini(struct radeon_device * rdev)5301 void evergreen_fini(struct radeon_device *rdev)
5302 {
5303 	radeon_pm_fini(rdev);
5304 	radeon_audio_fini(rdev);
5305 	r700_cp_fini(rdev);
5306 	r600_dma_fini(rdev);
5307 	r600_irq_fini(rdev);
5308 	if (rdev->flags & RADEON_IS_IGP)
5309 		sumo_rlc_fini(rdev);
5310 	radeon_wb_fini(rdev);
5311 	radeon_ib_pool_fini(rdev);
5312 	radeon_irq_kms_fini(rdev);
5313 	uvd_v1_0_fini(rdev);
5314 	radeon_uvd_fini(rdev);
5315 	evergreen_pcie_gart_fini(rdev);
5316 	r600_vram_scratch_fini(rdev);
5317 	radeon_gem_fini(rdev);
5318 	radeon_fence_driver_fini(rdev);
5319 	radeon_agp_fini(rdev);
5320 	radeon_bo_fini(rdev);
5321 	radeon_atombios_fini(rdev);
5322 	kfree(rdev->bios);
5323 	rdev->bios = NULL;
5324 }
5325 
evergreen_pcie_gen2_enable(struct radeon_device * rdev)5326 void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
5327 {
5328 	u32 link_width_cntl, speed_cntl;
5329 
5330 	if (radeon_pcie_gen2 == 0)
5331 		return;
5332 
5333 	if (rdev->flags & RADEON_IS_IGP)
5334 		return;
5335 
5336 	if (!(rdev->flags & RADEON_IS_PCIE))
5337 		return;
5338 
5339 	/* x2 cards have a special sequence */
5340 	if (ASIC_IS_X2(rdev))
5341 		return;
5342 
5343 	if ((rdev->pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) &&
5344 		(rdev->pdev->bus->max_bus_speed != PCIE_SPEED_8_0GT))
5345 		return;
5346 
5347 	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
5348 	if (speed_cntl & LC_CURRENT_DATA_RATE) {
5349 		DRM_INFO("PCIE gen 2 link speeds already enabled\n");
5350 		return;
5351 	}
5352 
5353 	DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
5354 
5355 	if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
5356 	    (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
5357 
5358 		link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
5359 		link_width_cntl &= ~LC_UPCONFIGURE_DIS;
5360 		WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
5361 
5362 		speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
5363 		speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
5364 		WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
5365 
5366 		speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
5367 		speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
5368 		WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
5369 
5370 		speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
5371 		speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
5372 		WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
5373 
5374 		speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
5375 		speed_cntl |= LC_GEN2_EN_STRAP;
5376 		WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
5377 
5378 	} else {
5379 		link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
5380 		/* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
5381 		if (1)
5382 			link_width_cntl |= LC_UPCONFIGURE_DIS;
5383 		else
5384 			link_width_cntl &= ~LC_UPCONFIGURE_DIS;
5385 		WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
5386 	}
5387 }
5388 
evergreen_program_aspm(struct radeon_device * rdev)5389 void evergreen_program_aspm(struct radeon_device *rdev)
5390 {
5391 	u32 data, orig;
5392 	u32 pcie_lc_cntl, pcie_lc_cntl_old;
5393 	bool disable_l0s, disable_l1 = false, disable_plloff_in_l1 = false;
5394 	/* fusion_platform = true
5395 	 * if the system is a fusion system
5396 	 * (APU or DGPU in a fusion system).
5397 	 * todo: check if the system is a fusion platform.
5398 	 */
5399 	bool fusion_platform = false;
5400 
5401 	if (radeon_aspm == 0)
5402 		return;
5403 
5404 	if (!(rdev->flags & RADEON_IS_PCIE))
5405 		return;
5406 
5407 	switch (rdev->family) {
5408 	case CHIP_CYPRESS:
5409 	case CHIP_HEMLOCK:
5410 	case CHIP_JUNIPER:
5411 	case CHIP_REDWOOD:
5412 	case CHIP_CEDAR:
5413 	case CHIP_SUMO:
5414 	case CHIP_SUMO2:
5415 	case CHIP_PALM:
5416 	case CHIP_ARUBA:
5417 		disable_l0s = true;
5418 		break;
5419 	default:
5420 		disable_l0s = false;
5421 		break;
5422 	}
5423 
5424 	if (rdev->flags & RADEON_IS_IGP)
5425 		fusion_platform = true; /* XXX also dGPUs in a fusion system */
5426 
5427 	data = orig = RREG32_PIF_PHY0(PB0_PIF_PAIRING);
5428 	if (fusion_platform)
5429 		data &= ~MULTI_PIF;
5430 	else
5431 		data |= MULTI_PIF;
5432 	if (data != orig)
5433 		WREG32_PIF_PHY0(PB0_PIF_PAIRING, data);
5434 
5435 	data = orig = RREG32_PIF_PHY1(PB1_PIF_PAIRING);
5436 	if (fusion_platform)
5437 		data &= ~MULTI_PIF;
5438 	else
5439 		data |= MULTI_PIF;
5440 	if (data != orig)
5441 		WREG32_PIF_PHY1(PB1_PIF_PAIRING, data);
5442 
5443 	pcie_lc_cntl = pcie_lc_cntl_old = RREG32_PCIE_PORT(PCIE_LC_CNTL);
5444 	pcie_lc_cntl &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK);
5445 	if (!disable_l0s) {
5446 		if (rdev->family >= CHIP_BARTS)
5447 			pcie_lc_cntl |= LC_L0S_INACTIVITY(7);
5448 		else
5449 			pcie_lc_cntl |= LC_L0S_INACTIVITY(3);
5450 	}
5451 
5452 	if (!disable_l1) {
5453 		if (rdev->family >= CHIP_BARTS)
5454 			pcie_lc_cntl |= LC_L1_INACTIVITY(7);
5455 		else
5456 			pcie_lc_cntl |= LC_L1_INACTIVITY(8);
5457 
5458 		if (!disable_plloff_in_l1) {
5459 			data = orig = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0);
5460 			data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
5461 			data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
5462 			if (data != orig)
5463 				WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0, data);
5464 
5465 			data = orig = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1);
5466 			data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
5467 			data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
5468 			if (data != orig)
5469 				WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1, data);
5470 
5471 			data = orig = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0);
5472 			data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
5473 			data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
5474 			if (data != orig)
5475 				WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0, data);
5476 
5477 			data = orig = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1);
5478 			data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
5479 			data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
5480 			if (data != orig)
5481 				WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1, data);
5482 
5483 			if (rdev->family >= CHIP_BARTS) {
5484 				data = orig = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0);
5485 				data &= ~PLL_RAMP_UP_TIME_0_MASK;
5486 				data |= PLL_RAMP_UP_TIME_0(4);
5487 				if (data != orig)
5488 					WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0, data);
5489 
5490 				data = orig = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1);
5491 				data &= ~PLL_RAMP_UP_TIME_1_MASK;
5492 				data |= PLL_RAMP_UP_TIME_1(4);
5493 				if (data != orig)
5494 					WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1, data);
5495 
5496 				data = orig = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0);
5497 				data &= ~PLL_RAMP_UP_TIME_0_MASK;
5498 				data |= PLL_RAMP_UP_TIME_0(4);
5499 				if (data != orig)
5500 					WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0, data);
5501 
5502 				data = orig = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1);
5503 				data &= ~PLL_RAMP_UP_TIME_1_MASK;
5504 				data |= PLL_RAMP_UP_TIME_1(4);
5505 				if (data != orig)
5506 					WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1, data);
5507 			}
5508 
5509 			data = orig = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
5510 			data &= ~LC_DYN_LANES_PWR_STATE_MASK;
5511 			data |= LC_DYN_LANES_PWR_STATE(3);
5512 			if (data != orig)
5513 				WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
5514 
5515 			if (rdev->family >= CHIP_BARTS) {
5516 				data = orig = RREG32_PIF_PHY0(PB0_PIF_CNTL);
5517 				data &= ~LS2_EXIT_TIME_MASK;
5518 				data |= LS2_EXIT_TIME(1);
5519 				if (data != orig)
5520 					WREG32_PIF_PHY0(PB0_PIF_CNTL, data);
5521 
5522 				data = orig = RREG32_PIF_PHY1(PB1_PIF_CNTL);
5523 				data &= ~LS2_EXIT_TIME_MASK;
5524 				data |= LS2_EXIT_TIME(1);
5525 				if (data != orig)
5526 					WREG32_PIF_PHY1(PB1_PIF_CNTL, data);
5527 			}
5528 		}
5529 	}
5530 
5531 	/* evergreen parts only */
5532 	if (rdev->family < CHIP_BARTS)
5533 		pcie_lc_cntl |= LC_PMI_TO_L1_DIS;
5534 
5535 	if (pcie_lc_cntl != pcie_lc_cntl_old)
5536 		WREG32_PCIE_PORT(PCIE_LC_CNTL, pcie_lc_cntl);
5537 }
5538