• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2010 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Alex Deucher
23  */
24 
25 #include <linux/firmware.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
28 #include <linux/slab.h>
29 
30 #include <drm/radeon_drm.h>
31 
32 #include "atom.h"
33 #include "cayman_blit_shaders.h"
34 #include "clearstate_cayman.h"
35 #include "ni_reg.h"
36 #include "nid.h"
37 #include "radeon.h"
38 #include "radeon_asic.h"
39 #include "radeon_audio.h"
40 #include "radeon_ucode.h"
41 
42 /*
43  * Indirect registers accessor
44  */
tn_smc_rreg(struct radeon_device * rdev,u32 reg)45 u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg)
46 {
47 	unsigned long flags;
48 	u32 r;
49 
50 	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
51 	WREG32(TN_SMC_IND_INDEX_0, (reg));
52 	r = RREG32(TN_SMC_IND_DATA_0);
53 	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
54 	return r;
55 }
56 
tn_smc_wreg(struct radeon_device * rdev,u32 reg,u32 v)57 void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
58 {
59 	unsigned long flags;
60 
61 	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
62 	WREG32(TN_SMC_IND_INDEX_0, (reg));
63 	WREG32(TN_SMC_IND_DATA_0, (v));
64 	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
65 }
66 
67 static const u32 tn_rlc_save_restore_register_list[] =
68 {
69 	0x98fc,
70 	0x98f0,
71 	0x9834,
72 	0x9838,
73 	0x9870,
74 	0x9874,
75 	0x8a14,
76 	0x8b24,
77 	0x8bcc,
78 	0x8b10,
79 	0x8c30,
80 	0x8d00,
81 	0x8d04,
82 	0x8c00,
83 	0x8c04,
84 	0x8c10,
85 	0x8c14,
86 	0x8d8c,
87 	0x8cf0,
88 	0x8e38,
89 	0x9508,
90 	0x9688,
91 	0x9608,
92 	0x960c,
93 	0x9610,
94 	0x9614,
95 	0x88c4,
96 	0x8978,
97 	0x88d4,
98 	0x900c,
99 	0x9100,
100 	0x913c,
101 	0x90e8,
102 	0x9354,
103 	0xa008,
104 	0x98f8,
105 	0x9148,
106 	0x914c,
107 	0x3f94,
108 	0x98f4,
109 	0x9b7c,
110 	0x3f8c,
111 	0x8950,
112 	0x8954,
113 	0x8a18,
114 	0x8b28,
115 	0x9144,
116 	0x3f90,
117 	0x915c,
118 	0x9160,
119 	0x9178,
120 	0x917c,
121 	0x9180,
122 	0x918c,
123 	0x9190,
124 	0x9194,
125 	0x9198,
126 	0x919c,
127 	0x91a8,
128 	0x91ac,
129 	0x91b0,
130 	0x91b4,
131 	0x91b8,
132 	0x91c4,
133 	0x91c8,
134 	0x91cc,
135 	0x91d0,
136 	0x91d4,
137 	0x91e0,
138 	0x91e4,
139 	0x91ec,
140 	0x91f0,
141 	0x91f4,
142 	0x9200,
143 	0x9204,
144 	0x929c,
145 	0x8030,
146 	0x9150,
147 	0x9a60,
148 	0x920c,
149 	0x9210,
150 	0x9228,
151 	0x922c,
152 	0x9244,
153 	0x9248,
154 	0x91e8,
155 	0x9294,
156 	0x9208,
157 	0x9224,
158 	0x9240,
159 	0x9220,
160 	0x923c,
161 	0x9258,
162 	0x9744,
163 	0xa200,
164 	0xa204,
165 	0xa208,
166 	0xa20c,
167 	0x8d58,
168 	0x9030,
169 	0x9034,
170 	0x9038,
171 	0x903c,
172 	0x9040,
173 	0x9654,
174 	0x897c,
175 	0xa210,
176 	0xa214,
177 	0x9868,
178 	0xa02c,
179 	0x9664,
180 	0x9698,
181 	0x949c,
182 	0x8e10,
183 	0x8e18,
184 	0x8c50,
185 	0x8c58,
186 	0x8c60,
187 	0x8c68,
188 	0x89b4,
189 	0x9830,
190 	0x802c,
191 };
192 
193 extern bool evergreen_is_display_hung(struct radeon_device *rdev);
194 extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
195 extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
196 extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
197 extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
198 extern void evergreen_mc_program(struct radeon_device *rdev);
199 extern void evergreen_irq_suspend(struct radeon_device *rdev);
200 extern int evergreen_mc_init(struct radeon_device *rdev);
201 extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
202 extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
203 extern void evergreen_program_aspm(struct radeon_device *rdev);
204 extern void sumo_rlc_fini(struct radeon_device *rdev);
205 extern int sumo_rlc_init(struct radeon_device *rdev);
206 extern void evergreen_gpu_pci_config_reset(struct radeon_device *rdev);
207 
208 /* Firmware Names */
209 MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
210 MODULE_FIRMWARE("radeon/BARTS_me.bin");
211 MODULE_FIRMWARE("radeon/BARTS_mc.bin");
212 MODULE_FIRMWARE("radeon/BARTS_smc.bin");
213 MODULE_FIRMWARE("radeon/BTC_rlc.bin");
214 MODULE_FIRMWARE("radeon/TURKS_pfp.bin");
215 MODULE_FIRMWARE("radeon/TURKS_me.bin");
216 MODULE_FIRMWARE("radeon/TURKS_mc.bin");
217 MODULE_FIRMWARE("radeon/TURKS_smc.bin");
218 MODULE_FIRMWARE("radeon/CAICOS_pfp.bin");
219 MODULE_FIRMWARE("radeon/CAICOS_me.bin");
220 MODULE_FIRMWARE("radeon/CAICOS_mc.bin");
221 MODULE_FIRMWARE("radeon/CAICOS_smc.bin");
222 MODULE_FIRMWARE("radeon/CAYMAN_pfp.bin");
223 MODULE_FIRMWARE("radeon/CAYMAN_me.bin");
224 MODULE_FIRMWARE("radeon/CAYMAN_mc.bin");
225 MODULE_FIRMWARE("radeon/CAYMAN_rlc.bin");
226 MODULE_FIRMWARE("radeon/CAYMAN_smc.bin");
227 MODULE_FIRMWARE("radeon/ARUBA_pfp.bin");
228 MODULE_FIRMWARE("radeon/ARUBA_me.bin");
229 MODULE_FIRMWARE("radeon/ARUBA_rlc.bin");
230 
231 
232 static const u32 cayman_golden_registers2[] =
233 {
234 	0x3e5c, 0xffffffff, 0x00000000,
235 	0x3e48, 0xffffffff, 0x00000000,
236 	0x3e4c, 0xffffffff, 0x00000000,
237 	0x3e64, 0xffffffff, 0x00000000,
238 	0x3e50, 0xffffffff, 0x00000000,
239 	0x3e60, 0xffffffff, 0x00000000
240 };
241 
242 static const u32 cayman_golden_registers[] =
243 {
244 	0x5eb4, 0xffffffff, 0x00000002,
245 	0x5e78, 0x8f311ff1, 0x001000f0,
246 	0x3f90, 0xffff0000, 0xff000000,
247 	0x9148, 0xffff0000, 0xff000000,
248 	0x3f94, 0xffff0000, 0xff000000,
249 	0x914c, 0xffff0000, 0xff000000,
250 	0xc78, 0x00000080, 0x00000080,
251 	0xbd4, 0x70073777, 0x00011003,
252 	0xd02c, 0xbfffff1f, 0x08421000,
253 	0xd0b8, 0x73773777, 0x02011003,
254 	0x5bc0, 0x00200000, 0x50100000,
255 	0x98f8, 0x33773777, 0x02011003,
256 	0x98fc, 0xffffffff, 0x76541032,
257 	0x7030, 0x31000311, 0x00000011,
258 	0x2f48, 0x33773777, 0x42010001,
259 	0x6b28, 0x00000010, 0x00000012,
260 	0x7728, 0x00000010, 0x00000012,
261 	0x10328, 0x00000010, 0x00000012,
262 	0x10f28, 0x00000010, 0x00000012,
263 	0x11b28, 0x00000010, 0x00000012,
264 	0x12728, 0x00000010, 0x00000012,
265 	0x240c, 0x000007ff, 0x00000000,
266 	0x8a14, 0xf000001f, 0x00000007,
267 	0x8b24, 0x3fff3fff, 0x00ff0fff,
268 	0x8b10, 0x0000ff0f, 0x00000000,
269 	0x28a4c, 0x07ffffff, 0x06000000,
270 	0x10c, 0x00000001, 0x00010003,
271 	0xa02c, 0xffffffff, 0x0000009b,
272 	0x913c, 0x0000010f, 0x01000100,
273 	0x8c04, 0xf8ff00ff, 0x40600060,
274 	0x28350, 0x00000f01, 0x00000000,
275 	0x9508, 0x3700001f, 0x00000002,
276 	0x960c, 0xffffffff, 0x54763210,
277 	0x88c4, 0x001f3ae3, 0x00000082,
278 	0x88d0, 0xffffffff, 0x0f40df40,
279 	0x88d4, 0x0000001f, 0x00000010,
280 	0x8974, 0xffffffff, 0x00000000
281 };
282 
283 static const u32 dvst_golden_registers2[] =
284 {
285 	0x8f8, 0xffffffff, 0,
286 	0x8fc, 0x00380000, 0,
287 	0x8f8, 0xffffffff, 1,
288 	0x8fc, 0x0e000000, 0
289 };
290 
291 static const u32 dvst_golden_registers[] =
292 {
293 	0x690, 0x3fff3fff, 0x20c00033,
294 	0x918c, 0x0fff0fff, 0x00010006,
295 	0x91a8, 0x0fff0fff, 0x00010006,
296 	0x9150, 0xffffdfff, 0x6e944040,
297 	0x917c, 0x0fff0fff, 0x00030002,
298 	0x9198, 0x0fff0fff, 0x00030002,
299 	0x915c, 0x0fff0fff, 0x00010000,
300 	0x3f90, 0xffff0001, 0xff000000,
301 	0x9178, 0x0fff0fff, 0x00070000,
302 	0x9194, 0x0fff0fff, 0x00070000,
303 	0x9148, 0xffff0001, 0xff000000,
304 	0x9190, 0x0fff0fff, 0x00090008,
305 	0x91ac, 0x0fff0fff, 0x00090008,
306 	0x3f94, 0xffff0000, 0xff000000,
307 	0x914c, 0xffff0000, 0xff000000,
308 	0x929c, 0x00000fff, 0x00000001,
309 	0x55e4, 0xff607fff, 0xfc000100,
310 	0x8a18, 0xff000fff, 0x00000100,
311 	0x8b28, 0xff000fff, 0x00000100,
312 	0x9144, 0xfffc0fff, 0x00000100,
313 	0x6ed8, 0x00010101, 0x00010000,
314 	0x9830, 0xffffffff, 0x00000000,
315 	0x9834, 0xf00fffff, 0x00000400,
316 	0x9838, 0xfffffffe, 0x00000000,
317 	0xd0c0, 0xff000fff, 0x00000100,
318 	0xd02c, 0xbfffff1f, 0x08421000,
319 	0xd0b8, 0x73773777, 0x12010001,
320 	0x5bb0, 0x000000f0, 0x00000070,
321 	0x98f8, 0x73773777, 0x12010001,
322 	0x98fc, 0xffffffff, 0x00000010,
323 	0x9b7c, 0x00ff0000, 0x00fc0000,
324 	0x8030, 0x00001f0f, 0x0000100a,
325 	0x2f48, 0x73773777, 0x12010001,
326 	0x2408, 0x00030000, 0x000c007f,
327 	0x8a14, 0xf000003f, 0x00000007,
328 	0x8b24, 0x3fff3fff, 0x00ff0fff,
329 	0x8b10, 0x0000ff0f, 0x00000000,
330 	0x28a4c, 0x07ffffff, 0x06000000,
331 	0x4d8, 0x00000fff, 0x00000100,
332 	0xa008, 0xffffffff, 0x00010000,
333 	0x913c, 0xffff03ff, 0x01000100,
334 	0x8c00, 0x000000ff, 0x00000003,
335 	0x8c04, 0xf8ff00ff, 0x40600060,
336 	0x8cf0, 0x1fff1fff, 0x08e00410,
337 	0x28350, 0x00000f01, 0x00000000,
338 	0x9508, 0xf700071f, 0x00000002,
339 	0x960c, 0xffffffff, 0x54763210,
340 	0x20ef8, 0x01ff01ff, 0x00000002,
341 	0x20e98, 0xfffffbff, 0x00200000,
342 	0x2015c, 0xffffffff, 0x00000f40,
343 	0x88c4, 0x001f3ae3, 0x00000082,
344 	0x8978, 0x3fffffff, 0x04050140,
345 	0x88d4, 0x0000001f, 0x00000010,
346 	0x8974, 0xffffffff, 0x00000000
347 };
348 
349 static const u32 scrapper_golden_registers[] =
350 {
351 	0x690, 0x3fff3fff, 0x20c00033,
352 	0x918c, 0x0fff0fff, 0x00010006,
353 	0x918c, 0x0fff0fff, 0x00010006,
354 	0x91a8, 0x0fff0fff, 0x00010006,
355 	0x91a8, 0x0fff0fff, 0x00010006,
356 	0x9150, 0xffffdfff, 0x6e944040,
357 	0x9150, 0xffffdfff, 0x6e944040,
358 	0x917c, 0x0fff0fff, 0x00030002,
359 	0x917c, 0x0fff0fff, 0x00030002,
360 	0x9198, 0x0fff0fff, 0x00030002,
361 	0x9198, 0x0fff0fff, 0x00030002,
362 	0x915c, 0x0fff0fff, 0x00010000,
363 	0x915c, 0x0fff0fff, 0x00010000,
364 	0x3f90, 0xffff0001, 0xff000000,
365 	0x3f90, 0xffff0001, 0xff000000,
366 	0x9178, 0x0fff0fff, 0x00070000,
367 	0x9178, 0x0fff0fff, 0x00070000,
368 	0x9194, 0x0fff0fff, 0x00070000,
369 	0x9194, 0x0fff0fff, 0x00070000,
370 	0x9148, 0xffff0001, 0xff000000,
371 	0x9148, 0xffff0001, 0xff000000,
372 	0x9190, 0x0fff0fff, 0x00090008,
373 	0x9190, 0x0fff0fff, 0x00090008,
374 	0x91ac, 0x0fff0fff, 0x00090008,
375 	0x91ac, 0x0fff0fff, 0x00090008,
376 	0x3f94, 0xffff0000, 0xff000000,
377 	0x3f94, 0xffff0000, 0xff000000,
378 	0x914c, 0xffff0000, 0xff000000,
379 	0x914c, 0xffff0000, 0xff000000,
380 	0x929c, 0x00000fff, 0x00000001,
381 	0x929c, 0x00000fff, 0x00000001,
382 	0x55e4, 0xff607fff, 0xfc000100,
383 	0x8a18, 0xff000fff, 0x00000100,
384 	0x8a18, 0xff000fff, 0x00000100,
385 	0x8b28, 0xff000fff, 0x00000100,
386 	0x8b28, 0xff000fff, 0x00000100,
387 	0x9144, 0xfffc0fff, 0x00000100,
388 	0x9144, 0xfffc0fff, 0x00000100,
389 	0x6ed8, 0x00010101, 0x00010000,
390 	0x9830, 0xffffffff, 0x00000000,
391 	0x9830, 0xffffffff, 0x00000000,
392 	0x9834, 0xf00fffff, 0x00000400,
393 	0x9834, 0xf00fffff, 0x00000400,
394 	0x9838, 0xfffffffe, 0x00000000,
395 	0x9838, 0xfffffffe, 0x00000000,
396 	0xd0c0, 0xff000fff, 0x00000100,
397 	0xd02c, 0xbfffff1f, 0x08421000,
398 	0xd02c, 0xbfffff1f, 0x08421000,
399 	0xd0b8, 0x73773777, 0x12010001,
400 	0xd0b8, 0x73773777, 0x12010001,
401 	0x5bb0, 0x000000f0, 0x00000070,
402 	0x98f8, 0x73773777, 0x12010001,
403 	0x98f8, 0x73773777, 0x12010001,
404 	0x98fc, 0xffffffff, 0x00000010,
405 	0x98fc, 0xffffffff, 0x00000010,
406 	0x9b7c, 0x00ff0000, 0x00fc0000,
407 	0x9b7c, 0x00ff0000, 0x00fc0000,
408 	0x8030, 0x00001f0f, 0x0000100a,
409 	0x8030, 0x00001f0f, 0x0000100a,
410 	0x2f48, 0x73773777, 0x12010001,
411 	0x2f48, 0x73773777, 0x12010001,
412 	0x2408, 0x00030000, 0x000c007f,
413 	0x8a14, 0xf000003f, 0x00000007,
414 	0x8a14, 0xf000003f, 0x00000007,
415 	0x8b24, 0x3fff3fff, 0x00ff0fff,
416 	0x8b24, 0x3fff3fff, 0x00ff0fff,
417 	0x8b10, 0x0000ff0f, 0x00000000,
418 	0x8b10, 0x0000ff0f, 0x00000000,
419 	0x28a4c, 0x07ffffff, 0x06000000,
420 	0x28a4c, 0x07ffffff, 0x06000000,
421 	0x4d8, 0x00000fff, 0x00000100,
422 	0x4d8, 0x00000fff, 0x00000100,
423 	0xa008, 0xffffffff, 0x00010000,
424 	0xa008, 0xffffffff, 0x00010000,
425 	0x913c, 0xffff03ff, 0x01000100,
426 	0x913c, 0xffff03ff, 0x01000100,
427 	0x90e8, 0x001fffff, 0x010400c0,
428 	0x8c00, 0x000000ff, 0x00000003,
429 	0x8c00, 0x000000ff, 0x00000003,
430 	0x8c04, 0xf8ff00ff, 0x40600060,
431 	0x8c04, 0xf8ff00ff, 0x40600060,
432 	0x8c30, 0x0000000f, 0x00040005,
433 	0x8cf0, 0x1fff1fff, 0x08e00410,
434 	0x8cf0, 0x1fff1fff, 0x08e00410,
435 	0x900c, 0x00ffffff, 0x0017071f,
436 	0x28350, 0x00000f01, 0x00000000,
437 	0x28350, 0x00000f01, 0x00000000,
438 	0x9508, 0xf700071f, 0x00000002,
439 	0x9508, 0xf700071f, 0x00000002,
440 	0x9688, 0x00300000, 0x0017000f,
441 	0x960c, 0xffffffff, 0x54763210,
442 	0x960c, 0xffffffff, 0x54763210,
443 	0x20ef8, 0x01ff01ff, 0x00000002,
444 	0x20e98, 0xfffffbff, 0x00200000,
445 	0x2015c, 0xffffffff, 0x00000f40,
446 	0x88c4, 0x001f3ae3, 0x00000082,
447 	0x88c4, 0x001f3ae3, 0x00000082,
448 	0x8978, 0x3fffffff, 0x04050140,
449 	0x8978, 0x3fffffff, 0x04050140,
450 	0x88d4, 0x0000001f, 0x00000010,
451 	0x88d4, 0x0000001f, 0x00000010,
452 	0x8974, 0xffffffff, 0x00000000,
453 	0x8974, 0xffffffff, 0x00000000
454 };
455 
ni_init_golden_registers(struct radeon_device * rdev)456 static void ni_init_golden_registers(struct radeon_device *rdev)
457 {
458 	switch (rdev->family) {
459 	case CHIP_CAYMAN:
460 		radeon_program_register_sequence(rdev,
461 						 cayman_golden_registers,
462 						 (const u32)ARRAY_SIZE(cayman_golden_registers));
463 		radeon_program_register_sequence(rdev,
464 						 cayman_golden_registers2,
465 						 (const u32)ARRAY_SIZE(cayman_golden_registers2));
466 		break;
467 	case CHIP_ARUBA:
468 		if ((rdev->pdev->device == 0x9900) ||
469 		    (rdev->pdev->device == 0x9901) ||
470 		    (rdev->pdev->device == 0x9903) ||
471 		    (rdev->pdev->device == 0x9904) ||
472 		    (rdev->pdev->device == 0x9905) ||
473 		    (rdev->pdev->device == 0x9906) ||
474 		    (rdev->pdev->device == 0x9907) ||
475 		    (rdev->pdev->device == 0x9908) ||
476 		    (rdev->pdev->device == 0x9909) ||
477 		    (rdev->pdev->device == 0x990A) ||
478 		    (rdev->pdev->device == 0x990B) ||
479 		    (rdev->pdev->device == 0x990C) ||
480 		    (rdev->pdev->device == 0x990D) ||
481 		    (rdev->pdev->device == 0x990E) ||
482 		    (rdev->pdev->device == 0x990F) ||
483 		    (rdev->pdev->device == 0x9910) ||
484 		    (rdev->pdev->device == 0x9913) ||
485 		    (rdev->pdev->device == 0x9917) ||
486 		    (rdev->pdev->device == 0x9918)) {
487 			radeon_program_register_sequence(rdev,
488 							 dvst_golden_registers,
489 							 (const u32)ARRAY_SIZE(dvst_golden_registers));
490 			radeon_program_register_sequence(rdev,
491 							 dvst_golden_registers2,
492 							 (const u32)ARRAY_SIZE(dvst_golden_registers2));
493 		} else {
494 			radeon_program_register_sequence(rdev,
495 							 scrapper_golden_registers,
496 							 (const u32)ARRAY_SIZE(scrapper_golden_registers));
497 			radeon_program_register_sequence(rdev,
498 							 dvst_golden_registers2,
499 							 (const u32)ARRAY_SIZE(dvst_golden_registers2));
500 		}
501 		break;
502 	default:
503 		break;
504 	}
505 }
506 
507 #define BTC_IO_MC_REGS_SIZE 29
508 
509 static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
510 	{0x00000077, 0xff010100},
511 	{0x00000078, 0x00000000},
512 	{0x00000079, 0x00001434},
513 	{0x0000007a, 0xcc08ec08},
514 	{0x0000007b, 0x00040000},
515 	{0x0000007c, 0x000080c0},
516 	{0x0000007d, 0x09000000},
517 	{0x0000007e, 0x00210404},
518 	{0x00000081, 0x08a8e800},
519 	{0x00000082, 0x00030444},
520 	{0x00000083, 0x00000000},
521 	{0x00000085, 0x00000001},
522 	{0x00000086, 0x00000002},
523 	{0x00000087, 0x48490000},
524 	{0x00000088, 0x20244647},
525 	{0x00000089, 0x00000005},
526 	{0x0000008b, 0x66030000},
527 	{0x0000008c, 0x00006603},
528 	{0x0000008d, 0x00000100},
529 	{0x0000008f, 0x00001c0a},
530 	{0x00000090, 0xff000001},
531 	{0x00000094, 0x00101101},
532 	{0x00000095, 0x00000fff},
533 	{0x00000096, 0x00116fff},
534 	{0x00000097, 0x60010000},
535 	{0x00000098, 0x10010000},
536 	{0x00000099, 0x00006000},
537 	{0x0000009a, 0x00001000},
538 	{0x0000009f, 0x00946a00}
539 };
540 
541 static const u32 turks_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
542 	{0x00000077, 0xff010100},
543 	{0x00000078, 0x00000000},
544 	{0x00000079, 0x00001434},
545 	{0x0000007a, 0xcc08ec08},
546 	{0x0000007b, 0x00040000},
547 	{0x0000007c, 0x000080c0},
548 	{0x0000007d, 0x09000000},
549 	{0x0000007e, 0x00210404},
550 	{0x00000081, 0x08a8e800},
551 	{0x00000082, 0x00030444},
552 	{0x00000083, 0x00000000},
553 	{0x00000085, 0x00000001},
554 	{0x00000086, 0x00000002},
555 	{0x00000087, 0x48490000},
556 	{0x00000088, 0x20244647},
557 	{0x00000089, 0x00000005},
558 	{0x0000008b, 0x66030000},
559 	{0x0000008c, 0x00006603},
560 	{0x0000008d, 0x00000100},
561 	{0x0000008f, 0x00001c0a},
562 	{0x00000090, 0xff000001},
563 	{0x00000094, 0x00101101},
564 	{0x00000095, 0x00000fff},
565 	{0x00000096, 0x00116fff},
566 	{0x00000097, 0x60010000},
567 	{0x00000098, 0x10010000},
568 	{0x00000099, 0x00006000},
569 	{0x0000009a, 0x00001000},
570 	{0x0000009f, 0x00936a00}
571 };
572 
573 static const u32 caicos_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
574 	{0x00000077, 0xff010100},
575 	{0x00000078, 0x00000000},
576 	{0x00000079, 0x00001434},
577 	{0x0000007a, 0xcc08ec08},
578 	{0x0000007b, 0x00040000},
579 	{0x0000007c, 0x000080c0},
580 	{0x0000007d, 0x09000000},
581 	{0x0000007e, 0x00210404},
582 	{0x00000081, 0x08a8e800},
583 	{0x00000082, 0x00030444},
584 	{0x00000083, 0x00000000},
585 	{0x00000085, 0x00000001},
586 	{0x00000086, 0x00000002},
587 	{0x00000087, 0x48490000},
588 	{0x00000088, 0x20244647},
589 	{0x00000089, 0x00000005},
590 	{0x0000008b, 0x66030000},
591 	{0x0000008c, 0x00006603},
592 	{0x0000008d, 0x00000100},
593 	{0x0000008f, 0x00001c0a},
594 	{0x00000090, 0xff000001},
595 	{0x00000094, 0x00101101},
596 	{0x00000095, 0x00000fff},
597 	{0x00000096, 0x00116fff},
598 	{0x00000097, 0x60010000},
599 	{0x00000098, 0x10010000},
600 	{0x00000099, 0x00006000},
601 	{0x0000009a, 0x00001000},
602 	{0x0000009f, 0x00916a00}
603 };
604 
605 static const u32 cayman_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
606 	{0x00000077, 0xff010100},
607 	{0x00000078, 0x00000000},
608 	{0x00000079, 0x00001434},
609 	{0x0000007a, 0xcc08ec08},
610 	{0x0000007b, 0x00040000},
611 	{0x0000007c, 0x000080c0},
612 	{0x0000007d, 0x09000000},
613 	{0x0000007e, 0x00210404},
614 	{0x00000081, 0x08a8e800},
615 	{0x00000082, 0x00030444},
616 	{0x00000083, 0x00000000},
617 	{0x00000085, 0x00000001},
618 	{0x00000086, 0x00000002},
619 	{0x00000087, 0x48490000},
620 	{0x00000088, 0x20244647},
621 	{0x00000089, 0x00000005},
622 	{0x0000008b, 0x66030000},
623 	{0x0000008c, 0x00006603},
624 	{0x0000008d, 0x00000100},
625 	{0x0000008f, 0x00001c0a},
626 	{0x00000090, 0xff000001},
627 	{0x00000094, 0x00101101},
628 	{0x00000095, 0x00000fff},
629 	{0x00000096, 0x00116fff},
630 	{0x00000097, 0x60010000},
631 	{0x00000098, 0x10010000},
632 	{0x00000099, 0x00006000},
633 	{0x0000009a, 0x00001000},
634 	{0x0000009f, 0x00976b00}
635 };
636 
ni_mc_load_microcode(struct radeon_device * rdev)637 int ni_mc_load_microcode(struct radeon_device *rdev)
638 {
639 	const __be32 *fw_data;
640 	u32 mem_type, running, blackout = 0;
641 	u32 *io_mc_regs;
642 	int i, ucode_size, regs_size;
643 
644 	if (!rdev->mc_fw)
645 		return -EINVAL;
646 
647 	switch (rdev->family) {
648 	case CHIP_BARTS:
649 		io_mc_regs = (u32 *)&barts_io_mc_regs;
650 		ucode_size = BTC_MC_UCODE_SIZE;
651 		regs_size = BTC_IO_MC_REGS_SIZE;
652 		break;
653 	case CHIP_TURKS:
654 		io_mc_regs = (u32 *)&turks_io_mc_regs;
655 		ucode_size = BTC_MC_UCODE_SIZE;
656 		regs_size = BTC_IO_MC_REGS_SIZE;
657 		break;
658 	case CHIP_CAICOS:
659 	default:
660 		io_mc_regs = (u32 *)&caicos_io_mc_regs;
661 		ucode_size = BTC_MC_UCODE_SIZE;
662 		regs_size = BTC_IO_MC_REGS_SIZE;
663 		break;
664 	case CHIP_CAYMAN:
665 		io_mc_regs = (u32 *)&cayman_io_mc_regs;
666 		ucode_size = CAYMAN_MC_UCODE_SIZE;
667 		regs_size = BTC_IO_MC_REGS_SIZE;
668 		break;
669 	}
670 
671 	mem_type = (RREG32(MC_SEQ_MISC0) & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT;
672 	running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
673 
674 	if ((mem_type == MC_SEQ_MISC0_GDDR5_VALUE) && (running == 0)) {
675 		if (running) {
676 			blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
677 			WREG32(MC_SHARED_BLACKOUT_CNTL, 1);
678 		}
679 
680 		/* reset the engine and set to writable */
681 		WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
682 		WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
683 
684 		/* load mc io regs */
685 		for (i = 0; i < regs_size; i++) {
686 			WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
687 			WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
688 		}
689 		/* load the MC ucode */
690 		fw_data = (const __be32 *)rdev->mc_fw->data;
691 		for (i = 0; i < ucode_size; i++)
692 			WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
693 
694 		/* put the engine back into the active state */
695 		WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
696 		WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
697 		WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
698 
699 		/* wait for training to complete */
700 		for (i = 0; i < rdev->usec_timeout; i++) {
701 			if (RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD)
702 				break;
703 			udelay(1);
704 		}
705 
706 		if (running)
707 			WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
708 	}
709 
710 	return 0;
711 }
712 
ni_init_microcode(struct radeon_device * rdev)713 int ni_init_microcode(struct radeon_device *rdev)
714 {
715 	const char *chip_name;
716 	const char *rlc_chip_name;
717 	size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size;
718 	size_t smc_req_size = 0;
719 	char fw_name[30];
720 	int err;
721 
722 	DRM_DEBUG("\n");
723 
724 	switch (rdev->family) {
725 	case CHIP_BARTS:
726 		chip_name = "BARTS";
727 		rlc_chip_name = "BTC";
728 		pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
729 		me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
730 		rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
731 		mc_req_size = BTC_MC_UCODE_SIZE * 4;
732 		smc_req_size = ALIGN(BARTS_SMC_UCODE_SIZE, 4);
733 		break;
734 	case CHIP_TURKS:
735 		chip_name = "TURKS";
736 		rlc_chip_name = "BTC";
737 		pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
738 		me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
739 		rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
740 		mc_req_size = BTC_MC_UCODE_SIZE * 4;
741 		smc_req_size = ALIGN(TURKS_SMC_UCODE_SIZE, 4);
742 		break;
743 	case CHIP_CAICOS:
744 		chip_name = "CAICOS";
745 		rlc_chip_name = "BTC";
746 		pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
747 		me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
748 		rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
749 		mc_req_size = BTC_MC_UCODE_SIZE * 4;
750 		smc_req_size = ALIGN(CAICOS_SMC_UCODE_SIZE, 4);
751 		break;
752 	case CHIP_CAYMAN:
753 		chip_name = "CAYMAN";
754 		rlc_chip_name = "CAYMAN";
755 		pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4;
756 		me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
757 		rlc_req_size = CAYMAN_RLC_UCODE_SIZE * 4;
758 		mc_req_size = CAYMAN_MC_UCODE_SIZE * 4;
759 		smc_req_size = ALIGN(CAYMAN_SMC_UCODE_SIZE, 4);
760 		break;
761 	case CHIP_ARUBA:
762 		chip_name = "ARUBA";
763 		rlc_chip_name = "ARUBA";
764 		/* pfp/me same size as CAYMAN */
765 		pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4;
766 		me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
767 		rlc_req_size = ARUBA_RLC_UCODE_SIZE * 4;
768 		mc_req_size = 0;
769 		break;
770 	default: BUG();
771 	}
772 
773 	DRM_INFO("Loading %s Microcode\n", chip_name);
774 
775 	snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
776 	err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
777 	if (err)
778 		goto out;
779 	if (rdev->pfp_fw->size != pfp_req_size) {
780 		pr_err("ni_cp: Bogus length %zu in firmware \"%s\"\n",
781 		       rdev->pfp_fw->size, fw_name);
782 		err = -EINVAL;
783 		goto out;
784 	}
785 
786 	snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
787 	err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
788 	if (err)
789 		goto out;
790 	if (rdev->me_fw->size != me_req_size) {
791 		pr_err("ni_cp: Bogus length %zu in firmware \"%s\"\n",
792 		       rdev->me_fw->size, fw_name);
793 		err = -EINVAL;
794 	}
795 
796 	snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
797 	err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
798 	if (err)
799 		goto out;
800 	if (rdev->rlc_fw->size != rlc_req_size) {
801 		pr_err("ni_rlc: Bogus length %zu in firmware \"%s\"\n",
802 		       rdev->rlc_fw->size, fw_name);
803 		err = -EINVAL;
804 	}
805 
806 	/* no MC ucode on TN */
807 	if (!(rdev->flags & RADEON_IS_IGP)) {
808 		snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
809 		err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
810 		if (err)
811 			goto out;
812 		if (rdev->mc_fw->size != mc_req_size) {
813 			pr_err("ni_mc: Bogus length %zu in firmware \"%s\"\n",
814 			       rdev->mc_fw->size, fw_name);
815 			err = -EINVAL;
816 		}
817 	}
818 
819 	if ((rdev->family >= CHIP_BARTS) && (rdev->family <= CHIP_CAYMAN)) {
820 		snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
821 		err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
822 		if (err) {
823 			pr_err("smc: error loading firmware \"%s\"\n", fw_name);
824 			release_firmware(rdev->smc_fw);
825 			rdev->smc_fw = NULL;
826 			err = 0;
827 		} else if (rdev->smc_fw->size != smc_req_size) {
828 			pr_err("ni_mc: Bogus length %zu in firmware \"%s\"\n",
829 			       rdev->mc_fw->size, fw_name);
830 			err = -EINVAL;
831 		}
832 	}
833 
834 out:
835 	if (err) {
836 		if (err != -EINVAL)
837 			pr_err("ni_cp: Failed to load firmware \"%s\"\n",
838 			       fw_name);
839 		release_firmware(rdev->pfp_fw);
840 		rdev->pfp_fw = NULL;
841 		release_firmware(rdev->me_fw);
842 		rdev->me_fw = NULL;
843 		release_firmware(rdev->rlc_fw);
844 		rdev->rlc_fw = NULL;
845 		release_firmware(rdev->mc_fw);
846 		rdev->mc_fw = NULL;
847 	}
848 	return err;
849 }
850 
851 /**
852  * cayman_get_allowed_info_register - fetch the register for the info ioctl
853  *
854  * @rdev: radeon_device pointer
855  * @reg: register offset in bytes
856  * @val: register value
857  *
858  * Returns 0 for success or -EINVAL for an invalid register
859  *
860  */
cayman_get_allowed_info_register(struct radeon_device * rdev,u32 reg,u32 * val)861 int cayman_get_allowed_info_register(struct radeon_device *rdev,
862 				     u32 reg, u32 *val)
863 {
864 	switch (reg) {
865 	case GRBM_STATUS:
866 	case GRBM_STATUS_SE0:
867 	case GRBM_STATUS_SE1:
868 	case SRBM_STATUS:
869 	case SRBM_STATUS2:
870 	case (DMA_STATUS_REG + DMA0_REGISTER_OFFSET):
871 	case (DMA_STATUS_REG + DMA1_REGISTER_OFFSET):
872 	case UVD_STATUS:
873 		*val = RREG32(reg);
874 		return 0;
875 	default:
876 		return -EINVAL;
877 	}
878 }
879 
tn_get_temp(struct radeon_device * rdev)880 int tn_get_temp(struct radeon_device *rdev)
881 {
882 	u32 temp = RREG32_SMC(TN_CURRENT_GNB_TEMP) & 0x7ff;
883 	int actual_temp = (temp / 8) - 49;
884 
885 	return actual_temp * 1000;
886 }
887 
888 /*
889  * Core functions
890  */
cayman_gpu_init(struct radeon_device * rdev)891 static void cayman_gpu_init(struct radeon_device *rdev)
892 {
893 	u32 gb_addr_config = 0;
894 	u32 mc_shared_chmap, mc_arb_ramcfg;
895 	u32 cgts_tcc_disable;
896 	u32 sx_debug_1;
897 	u32 smx_dc_ctl0;
898 	u32 cgts_sm_ctrl_reg;
899 	u32 hdp_host_path_cntl;
900 	u32 tmp;
901 	u32 disabled_rb_mask;
902 	int i, j;
903 
904 	switch (rdev->family) {
905 	case CHIP_CAYMAN:
906 		rdev->config.cayman.max_shader_engines = 2;
907 		rdev->config.cayman.max_pipes_per_simd = 4;
908 		rdev->config.cayman.max_tile_pipes = 8;
909 		rdev->config.cayman.max_simds_per_se = 12;
910 		rdev->config.cayman.max_backends_per_se = 4;
911 		rdev->config.cayman.max_texture_channel_caches = 8;
912 		rdev->config.cayman.max_gprs = 256;
913 		rdev->config.cayman.max_threads = 256;
914 		rdev->config.cayman.max_gs_threads = 32;
915 		rdev->config.cayman.max_stack_entries = 512;
916 		rdev->config.cayman.sx_num_of_sets = 8;
917 		rdev->config.cayman.sx_max_export_size = 256;
918 		rdev->config.cayman.sx_max_export_pos_size = 64;
919 		rdev->config.cayman.sx_max_export_smx_size = 192;
920 		rdev->config.cayman.max_hw_contexts = 8;
921 		rdev->config.cayman.sq_num_cf_insts = 2;
922 
923 		rdev->config.cayman.sc_prim_fifo_size = 0x100;
924 		rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
925 		rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
926 		gb_addr_config = CAYMAN_GB_ADDR_CONFIG_GOLDEN;
927 		break;
928 	case CHIP_ARUBA:
929 	default:
930 		rdev->config.cayman.max_shader_engines = 1;
931 		rdev->config.cayman.max_pipes_per_simd = 4;
932 		rdev->config.cayman.max_tile_pipes = 2;
933 		if ((rdev->pdev->device == 0x9900) ||
934 		    (rdev->pdev->device == 0x9901) ||
935 		    (rdev->pdev->device == 0x9905) ||
936 		    (rdev->pdev->device == 0x9906) ||
937 		    (rdev->pdev->device == 0x9907) ||
938 		    (rdev->pdev->device == 0x9908) ||
939 		    (rdev->pdev->device == 0x9909) ||
940 		    (rdev->pdev->device == 0x990B) ||
941 		    (rdev->pdev->device == 0x990C) ||
942 		    (rdev->pdev->device == 0x990F) ||
943 		    (rdev->pdev->device == 0x9910) ||
944 		    (rdev->pdev->device == 0x9917) ||
945 		    (rdev->pdev->device == 0x9999) ||
946 		    (rdev->pdev->device == 0x999C)) {
947 			rdev->config.cayman.max_simds_per_se = 6;
948 			rdev->config.cayman.max_backends_per_se = 2;
949 			rdev->config.cayman.max_hw_contexts = 8;
950 			rdev->config.cayman.sx_max_export_size = 256;
951 			rdev->config.cayman.sx_max_export_pos_size = 64;
952 			rdev->config.cayman.sx_max_export_smx_size = 192;
953 		} else if ((rdev->pdev->device == 0x9903) ||
954 			   (rdev->pdev->device == 0x9904) ||
955 			   (rdev->pdev->device == 0x990A) ||
956 			   (rdev->pdev->device == 0x990D) ||
957 			   (rdev->pdev->device == 0x990E) ||
958 			   (rdev->pdev->device == 0x9913) ||
959 			   (rdev->pdev->device == 0x9918) ||
960 			   (rdev->pdev->device == 0x999D)) {
961 			rdev->config.cayman.max_simds_per_se = 4;
962 			rdev->config.cayman.max_backends_per_se = 2;
963 			rdev->config.cayman.max_hw_contexts = 8;
964 			rdev->config.cayman.sx_max_export_size = 256;
965 			rdev->config.cayman.sx_max_export_pos_size = 64;
966 			rdev->config.cayman.sx_max_export_smx_size = 192;
967 		} else if ((rdev->pdev->device == 0x9919) ||
968 			   (rdev->pdev->device == 0x9990) ||
969 			   (rdev->pdev->device == 0x9991) ||
970 			   (rdev->pdev->device == 0x9994) ||
971 			   (rdev->pdev->device == 0x9995) ||
972 			   (rdev->pdev->device == 0x9996) ||
973 			   (rdev->pdev->device == 0x999A) ||
974 			   (rdev->pdev->device == 0x99A0)) {
975 			rdev->config.cayman.max_simds_per_se = 3;
976 			rdev->config.cayman.max_backends_per_se = 1;
977 			rdev->config.cayman.max_hw_contexts = 4;
978 			rdev->config.cayman.sx_max_export_size = 128;
979 			rdev->config.cayman.sx_max_export_pos_size = 32;
980 			rdev->config.cayman.sx_max_export_smx_size = 96;
981 		} else {
982 			rdev->config.cayman.max_simds_per_se = 2;
983 			rdev->config.cayman.max_backends_per_se = 1;
984 			rdev->config.cayman.max_hw_contexts = 4;
985 			rdev->config.cayman.sx_max_export_size = 128;
986 			rdev->config.cayman.sx_max_export_pos_size = 32;
987 			rdev->config.cayman.sx_max_export_smx_size = 96;
988 		}
989 		rdev->config.cayman.max_texture_channel_caches = 2;
990 		rdev->config.cayman.max_gprs = 256;
991 		rdev->config.cayman.max_threads = 256;
992 		rdev->config.cayman.max_gs_threads = 32;
993 		rdev->config.cayman.max_stack_entries = 512;
994 		rdev->config.cayman.sx_num_of_sets = 8;
995 		rdev->config.cayman.sq_num_cf_insts = 2;
996 
997 		rdev->config.cayman.sc_prim_fifo_size = 0x40;
998 		rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
999 		rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
1000 		gb_addr_config = ARUBA_GB_ADDR_CONFIG_GOLDEN;
1001 		break;
1002 	}
1003 
1004 	/* Initialize HDP */
1005 	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1006 		WREG32((0x2c14 + j), 0x00000000);
1007 		WREG32((0x2c18 + j), 0x00000000);
1008 		WREG32((0x2c1c + j), 0x00000000);
1009 		WREG32((0x2c20 + j), 0x00000000);
1010 		WREG32((0x2c24 + j), 0x00000000);
1011 	}
1012 
1013 	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1014 	WREG32(SRBM_INT_CNTL, 0x1);
1015 	WREG32(SRBM_INT_ACK, 0x1);
1016 
1017 	evergreen_fix_pci_max_read_req_size(rdev);
1018 
1019 	mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
1020 	mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
1021 
1022 	tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
1023 	rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
1024 	if (rdev->config.cayman.mem_row_size_in_kb > 4)
1025 		rdev->config.cayman.mem_row_size_in_kb = 4;
1026 	/* XXX use MC settings? */
1027 	rdev->config.cayman.shader_engine_tile_size = 32;
1028 	rdev->config.cayman.num_gpus = 1;
1029 	rdev->config.cayman.multi_gpu_tile_size = 64;
1030 
1031 	tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT;
1032 	rdev->config.cayman.num_tile_pipes = (1 << tmp);
1033 	tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
1034 	rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256;
1035 	tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT;
1036 	rdev->config.cayman.num_shader_engines = tmp + 1;
1037 	tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT;
1038 	rdev->config.cayman.num_gpus = tmp + 1;
1039 	tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT;
1040 	rdev->config.cayman.multi_gpu_tile_size = 1 << tmp;
1041 	tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT;
1042 	rdev->config.cayman.mem_row_size_in_kb = 1 << tmp;
1043 
1044 
1045 	/* setup tiling info dword.  gb_addr_config is not adequate since it does
1046 	 * not have bank info, so create a custom tiling dword.
1047 	 * bits 3:0   num_pipes
1048 	 * bits 7:4   num_banks
1049 	 * bits 11:8  group_size
1050 	 * bits 15:12 row_size
1051 	 */
1052 	rdev->config.cayman.tile_config = 0;
1053 	switch (rdev->config.cayman.num_tile_pipes) {
1054 	case 1:
1055 	default:
1056 		rdev->config.cayman.tile_config |= (0 << 0);
1057 		break;
1058 	case 2:
1059 		rdev->config.cayman.tile_config |= (1 << 0);
1060 		break;
1061 	case 4:
1062 		rdev->config.cayman.tile_config |= (2 << 0);
1063 		break;
1064 	case 8:
1065 		rdev->config.cayman.tile_config |= (3 << 0);
1066 		break;
1067 	}
1068 
1069 	/* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
1070 	if (rdev->flags & RADEON_IS_IGP)
1071 		rdev->config.cayman.tile_config |= 1 << 4;
1072 	else {
1073 		switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
1074 		case 0: /* four banks */
1075 			rdev->config.cayman.tile_config |= 0 << 4;
1076 			break;
1077 		case 1: /* eight banks */
1078 			rdev->config.cayman.tile_config |= 1 << 4;
1079 			break;
1080 		case 2: /* sixteen banks */
1081 		default:
1082 			rdev->config.cayman.tile_config |= 2 << 4;
1083 			break;
1084 		}
1085 	}
1086 	rdev->config.cayman.tile_config |=
1087 		((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
1088 	rdev->config.cayman.tile_config |=
1089 		((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
1090 
1091 	tmp = 0;
1092 	for (i = (rdev->config.cayman.max_shader_engines - 1); i >= 0; i--) {
1093 		u32 rb_disable_bitmap;
1094 
1095 		WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
1096 		WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
1097 		rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
1098 		tmp <<= 4;
1099 		tmp |= rb_disable_bitmap;
1100 	}
1101 	/* enabled rb are just the one not disabled :) */
1102 	disabled_rb_mask = tmp;
1103 	tmp = 0;
1104 	for (i = 0; i < (rdev->config.cayman.max_backends_per_se * rdev->config.cayman.max_shader_engines); i++)
1105 		tmp |= (1 << i);
1106 	/* if all the backends are disabled, fix it up here */
1107 	if ((disabled_rb_mask & tmp) == tmp) {
1108 		for (i = 0; i < (rdev->config.cayman.max_backends_per_se * rdev->config.cayman.max_shader_engines); i++)
1109 			disabled_rb_mask &= ~(1 << i);
1110 	}
1111 
1112 	for (i = 0; i < rdev->config.cayman.max_shader_engines; i++) {
1113 		u32 simd_disable_bitmap;
1114 
1115 		WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
1116 		WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
1117 		simd_disable_bitmap = (RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffff0000) >> 16;
1118 		simd_disable_bitmap |= 0xffffffff << rdev->config.cayman.max_simds_per_se;
1119 		tmp <<= 16;
1120 		tmp |= simd_disable_bitmap;
1121 	}
1122 	rdev->config.cayman.active_simds = hweight32(~tmp);
1123 
1124 	WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
1125 	WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
1126 
1127 	WREG32(GB_ADDR_CONFIG, gb_addr_config);
1128 	WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
1129 	if (ASIC_IS_DCE6(rdev))
1130 		WREG32(DMIF_ADDR_CALC, gb_addr_config);
1131 	WREG32(HDP_ADDR_CONFIG, gb_addr_config);
1132 	WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
1133 	WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
1134 	WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
1135 	WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
1136 	WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
1137 
1138 	if ((rdev->config.cayman.max_backends_per_se == 1) &&
1139 	    (rdev->flags & RADEON_IS_IGP)) {
1140 		if ((disabled_rb_mask & 3) == 2) {
1141 			/* RB1 disabled, RB0 enabled */
1142 			tmp = 0x00000000;
1143 		} else {
1144 			/* RB0 disabled, RB1 enabled */
1145 			tmp = 0x11111111;
1146 		}
1147 	} else {
1148 		tmp = gb_addr_config & NUM_PIPES_MASK;
1149 		tmp = r6xx_remap_render_backend(rdev, tmp,
1150 						rdev->config.cayman.max_backends_per_se *
1151 						rdev->config.cayman.max_shader_engines,
1152 						CAYMAN_MAX_BACKENDS, disabled_rb_mask);
1153 	}
1154 	rdev->config.cayman.backend_map = tmp;
1155 	WREG32(GB_BACKEND_MAP, tmp);
1156 
1157 	cgts_tcc_disable = 0xffff0000;
1158 	for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++)
1159 		cgts_tcc_disable &= ~(1 << (16 + i));
1160 	WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable);
1161 	WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable);
1162 	WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable);
1163 	WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable);
1164 
1165 	/* reprogram the shader complex */
1166 	cgts_sm_ctrl_reg = RREG32(CGTS_SM_CTRL_REG);
1167 	for (i = 0; i < 16; i++)
1168 		WREG32(CGTS_SM_CTRL_REG, OVERRIDE);
1169 	WREG32(CGTS_SM_CTRL_REG, cgts_sm_ctrl_reg);
1170 
1171 	/* set HW defaults for 3D engine */
1172 	WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
1173 
1174 	sx_debug_1 = RREG32(SX_DEBUG_1);
1175 	sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
1176 	WREG32(SX_DEBUG_1, sx_debug_1);
1177 
1178 	smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
1179 	smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
1180 	smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.cayman.sx_num_of_sets);
1181 	WREG32(SMX_DC_CTL0, smx_dc_ctl0);
1182 
1183 	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE);
1184 
1185 	/* need to be explicitly zero-ed */
1186 	WREG32(VGT_OFFCHIP_LDS_BASE, 0);
1187 	WREG32(SQ_LSTMP_RING_BASE, 0);
1188 	WREG32(SQ_HSTMP_RING_BASE, 0);
1189 	WREG32(SQ_ESTMP_RING_BASE, 0);
1190 	WREG32(SQ_GSTMP_RING_BASE, 0);
1191 	WREG32(SQ_VSTMP_RING_BASE, 0);
1192 	WREG32(SQ_PSTMP_RING_BASE, 0);
1193 
1194 	WREG32(TA_CNTL_AUX, DISABLE_CUBE_ANISO);
1195 
1196 	WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.cayman.sx_max_export_size / 4) - 1) |
1197 					POSITION_BUFFER_SIZE((rdev->config.cayman.sx_max_export_pos_size / 4) - 1) |
1198 					SMX_BUFFER_SIZE((rdev->config.cayman.sx_max_export_smx_size / 4) - 1)));
1199 
1200 	WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.cayman.sc_prim_fifo_size) |
1201 				 SC_HIZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_hiz_tile_fifo_size) |
1202 				 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_earlyz_tile_fifo_size)));
1203 
1204 
1205 	WREG32(VGT_NUM_INSTANCES, 1);
1206 
1207 	WREG32(CP_PERFMON_CNTL, 0);
1208 
1209 	WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.cayman.sq_num_cf_insts) |
1210 				  FETCH_FIFO_HIWATER(0x4) |
1211 				  DONE_FIFO_HIWATER(0xe0) |
1212 				  ALU_UPDATE_FIFO_HIWATER(0x8)));
1213 
1214 	WREG32(SQ_GPR_RESOURCE_MGMT_1, NUM_CLAUSE_TEMP_GPRS(4));
1215 	WREG32(SQ_CONFIG, (VC_ENABLE |
1216 			   EXPORT_SRC_C |
1217 			   GFX_PRIO(0) |
1218 			   CS1_PRIO(0) |
1219 			   CS2_PRIO(1)));
1220 	WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, DYN_GPR_ENABLE);
1221 
1222 	WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
1223 					  FORCE_EOV_MAX_REZ_CNT(255)));
1224 
1225 	WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
1226 	       AUTO_INVLD_EN(ES_AND_GS_AUTO));
1227 
1228 	WREG32(VGT_GS_VERTEX_REUSE, 16);
1229 	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1230 
1231 	WREG32(CB_PERF_CTR0_SEL_0, 0);
1232 	WREG32(CB_PERF_CTR0_SEL_1, 0);
1233 	WREG32(CB_PERF_CTR1_SEL_0, 0);
1234 	WREG32(CB_PERF_CTR1_SEL_1, 0);
1235 	WREG32(CB_PERF_CTR2_SEL_0, 0);
1236 	WREG32(CB_PERF_CTR2_SEL_1, 0);
1237 	WREG32(CB_PERF_CTR3_SEL_0, 0);
1238 	WREG32(CB_PERF_CTR3_SEL_1, 0);
1239 
1240 	tmp = RREG32(HDP_MISC_CNTL);
1241 	tmp |= HDP_FLUSH_INVALIDATE_CACHE;
1242 	WREG32(HDP_MISC_CNTL, tmp);
1243 
1244 	hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
1245 	WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
1246 
1247 	WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
1248 
1249 	udelay(50);
1250 
1251 	/* set clockgating golden values on TN */
1252 	if (rdev->family == CHIP_ARUBA) {
1253 		tmp = RREG32_CG(CG_CGTT_LOCAL_0);
1254 		tmp &= ~0x00380000;
1255 		WREG32_CG(CG_CGTT_LOCAL_0, tmp);
1256 		tmp = RREG32_CG(CG_CGTT_LOCAL_1);
1257 		tmp &= ~0x0e000000;
1258 		WREG32_CG(CG_CGTT_LOCAL_1, tmp);
1259 	}
1260 }
1261 
1262 /*
1263  * GART
1264  */
cayman_pcie_gart_tlb_flush(struct radeon_device * rdev)1265 void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev)
1266 {
1267 	/* flush hdp cache */
1268 	WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
1269 
1270 	/* bits 0-7 are the VM contexts0-7 */
1271 	WREG32(VM_INVALIDATE_REQUEST, 1);
1272 }
1273 
cayman_pcie_gart_enable(struct radeon_device * rdev)1274 static int cayman_pcie_gart_enable(struct radeon_device *rdev)
1275 {
1276 	int i, r;
1277 
1278 	if (rdev->gart.robj == NULL) {
1279 		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
1280 		return -EINVAL;
1281 	}
1282 	r = radeon_gart_table_vram_pin(rdev);
1283 	if (r)
1284 		return r;
1285 	/* Setup TLB control */
1286 	WREG32(MC_VM_MX_L1_TLB_CNTL,
1287 	       (0xA << 7) |
1288 	       ENABLE_L1_TLB |
1289 	       ENABLE_L1_FRAGMENT_PROCESSING |
1290 	       SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1291 	       ENABLE_ADVANCED_DRIVER_MODEL |
1292 	       SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
1293 	/* Setup L2 cache */
1294 	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
1295 	       ENABLE_L2_FRAGMENT_PROCESSING |
1296 	       ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1297 	       ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
1298 	       EFFECTIVE_L2_QUEUE_SIZE(7) |
1299 	       CONTEXT1_IDENTITY_ACCESS_MODE(1));
1300 	WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
1301 	WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
1302 	       BANK_SELECT(6) |
1303 	       L2_CACHE_BIGK_FRAGMENT_SIZE(6));
1304 	/* setup context0 */
1305 	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
1306 	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
1307 	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
1308 	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
1309 			(u32)(rdev->dummy_page.addr >> 12));
1310 	WREG32(VM_CONTEXT0_CNTL2, 0);
1311 	WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
1312 				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
1313 
1314 	WREG32(0x15D4, 0);
1315 	WREG32(0x15D8, 0);
1316 	WREG32(0x15DC, 0);
1317 
1318 	/* empty context1-7 */
1319 	/* Assign the pt base to something valid for now; the pts used for
1320 	 * the VMs are determined by the application and setup and assigned
1321 	 * on the fly in the vm part of radeon_gart.c
1322 	 */
1323 	for (i = 1; i < 8; i++) {
1324 		WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
1325 		WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2),
1326 			rdev->vm_manager.max_pfn - 1);
1327 		WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
1328 		       rdev->vm_manager.saved_table_addr[i]);
1329 	}
1330 
1331 	/* enable context1-7 */
1332 	WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
1333 	       (u32)(rdev->dummy_page.addr >> 12));
1334 	WREG32(VM_CONTEXT1_CNTL2, 4);
1335 	WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
1336 				PAGE_TABLE_BLOCK_SIZE(radeon_vm_block_size - 9) |
1337 				RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
1338 				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
1339 				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
1340 				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
1341 				PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
1342 				PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
1343 				VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
1344 				VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
1345 				READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
1346 				READ_PROTECTION_FAULT_ENABLE_DEFAULT |
1347 				WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
1348 				WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
1349 
1350 	cayman_pcie_gart_tlb_flush(rdev);
1351 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1352 		 (unsigned)(rdev->mc.gtt_size >> 20),
1353 		 (unsigned long long)rdev->gart.table_addr);
1354 	rdev->gart.ready = true;
1355 	return 0;
1356 }
1357 
cayman_pcie_gart_disable(struct radeon_device * rdev)1358 static void cayman_pcie_gart_disable(struct radeon_device *rdev)
1359 {
1360 	unsigned i;
1361 
1362 	for (i = 1; i < 8; ++i) {
1363 		rdev->vm_manager.saved_table_addr[i] = RREG32(
1364 			VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2));
1365 	}
1366 
1367 	/* Disable all tables */
1368 	WREG32(VM_CONTEXT0_CNTL, 0);
1369 	WREG32(VM_CONTEXT1_CNTL, 0);
1370 	/* Setup TLB control */
1371 	WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING |
1372 	       SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1373 	       SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
1374 	/* Setup L2 cache */
1375 	WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1376 	       ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
1377 	       EFFECTIVE_L2_QUEUE_SIZE(7) |
1378 	       CONTEXT1_IDENTITY_ACCESS_MODE(1));
1379 	WREG32(VM_L2_CNTL2, 0);
1380 	WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
1381 	       L2_CACHE_BIGK_FRAGMENT_SIZE(6));
1382 	radeon_gart_table_vram_unpin(rdev);
1383 }
1384 
cayman_pcie_gart_fini(struct radeon_device * rdev)1385 static void cayman_pcie_gart_fini(struct radeon_device *rdev)
1386 {
1387 	cayman_pcie_gart_disable(rdev);
1388 	radeon_gart_table_vram_free(rdev);
1389 	radeon_gart_fini(rdev);
1390 }
1391 
cayman_cp_int_cntl_setup(struct radeon_device * rdev,int ring,u32 cp_int_cntl)1392 void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
1393 			      int ring, u32 cp_int_cntl)
1394 {
1395 	WREG32(SRBM_GFX_CNTL, RINGID(ring));
1396 	WREG32(CP_INT_CNTL, cp_int_cntl);
1397 }
1398 
1399 /*
1400  * CP.
1401  */
cayman_fence_ring_emit(struct radeon_device * rdev,struct radeon_fence * fence)1402 void cayman_fence_ring_emit(struct radeon_device *rdev,
1403 			    struct radeon_fence *fence)
1404 {
1405 	struct radeon_ring *ring = &rdev->ring[fence->ring];
1406 	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
1407 	u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
1408 		PACKET3_SH_ACTION_ENA;
1409 
1410 	/* flush read cache over gart for this vmid */
1411 	radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
1412 	radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
1413 	radeon_ring_write(ring, 0xFFFFFFFF);
1414 	radeon_ring_write(ring, 0);
1415 	radeon_ring_write(ring, 10); /* poll interval */
1416 	/* EVENT_WRITE_EOP - flush caches, send int */
1417 	radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
1418 	radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
1419 	radeon_ring_write(ring, lower_32_bits(addr));
1420 	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
1421 	radeon_ring_write(ring, fence->seq);
1422 	radeon_ring_write(ring, 0);
1423 }
1424 
cayman_ring_ib_execute(struct radeon_device * rdev,struct radeon_ib * ib)1425 void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
1426 {
1427 	struct radeon_ring *ring = &rdev->ring[ib->ring];
1428 	unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
1429 	u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
1430 		PACKET3_SH_ACTION_ENA;
1431 
1432 	/* set to DX10/11 mode */
1433 	radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
1434 	radeon_ring_write(ring, 1);
1435 
1436 	if (ring->rptr_save_reg) {
1437 		uint32_t next_rptr = ring->wptr + 3 + 4 + 8;
1438 		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1439 		radeon_ring_write(ring, ((ring->rptr_save_reg -
1440 					  PACKET3_SET_CONFIG_REG_START) >> 2));
1441 		radeon_ring_write(ring, next_rptr);
1442 	}
1443 
1444 	radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
1445 	radeon_ring_write(ring,
1446 #ifdef __BIG_ENDIAN
1447 			  (2 << 0) |
1448 #endif
1449 			  (ib->gpu_addr & 0xFFFFFFFC));
1450 	radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
1451 	radeon_ring_write(ring, ib->length_dw | (vm_id << 24));
1452 
1453 	/* flush read cache over gart for this vmid */
1454 	radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
1455 	radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
1456 	radeon_ring_write(ring, 0xFFFFFFFF);
1457 	radeon_ring_write(ring, 0);
1458 	radeon_ring_write(ring, (vm_id << 24) | 10); /* poll interval */
1459 }
1460 
cayman_cp_enable(struct radeon_device * rdev,bool enable)1461 static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
1462 {
1463 	if (enable)
1464 		WREG32(CP_ME_CNTL, 0);
1465 	else {
1466 		if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
1467 			radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1468 		WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
1469 		WREG32(SCRATCH_UMSK, 0);
1470 		rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1471 	}
1472 }
1473 
cayman_gfx_get_rptr(struct radeon_device * rdev,struct radeon_ring * ring)1474 u32 cayman_gfx_get_rptr(struct radeon_device *rdev,
1475 			struct radeon_ring *ring)
1476 {
1477 	u32 rptr;
1478 
1479 	if (rdev->wb.enabled)
1480 		rptr = rdev->wb.wb[ring->rptr_offs/4];
1481 	else {
1482 		if (ring->idx == RADEON_RING_TYPE_GFX_INDEX)
1483 			rptr = RREG32(CP_RB0_RPTR);
1484 		else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX)
1485 			rptr = RREG32(CP_RB1_RPTR);
1486 		else
1487 			rptr = RREG32(CP_RB2_RPTR);
1488 	}
1489 
1490 	return rptr;
1491 }
1492 
cayman_gfx_get_wptr(struct radeon_device * rdev,struct radeon_ring * ring)1493 u32 cayman_gfx_get_wptr(struct radeon_device *rdev,
1494 			struct radeon_ring *ring)
1495 {
1496 	u32 wptr;
1497 
1498 	if (ring->idx == RADEON_RING_TYPE_GFX_INDEX)
1499 		wptr = RREG32(CP_RB0_WPTR);
1500 	else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX)
1501 		wptr = RREG32(CP_RB1_WPTR);
1502 	else
1503 		wptr = RREG32(CP_RB2_WPTR);
1504 
1505 	return wptr;
1506 }
1507 
cayman_gfx_set_wptr(struct radeon_device * rdev,struct radeon_ring * ring)1508 void cayman_gfx_set_wptr(struct radeon_device *rdev,
1509 			 struct radeon_ring *ring)
1510 {
1511 	if (ring->idx == RADEON_RING_TYPE_GFX_INDEX) {
1512 		WREG32(CP_RB0_WPTR, ring->wptr);
1513 		(void)RREG32(CP_RB0_WPTR);
1514 	} else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX) {
1515 		WREG32(CP_RB1_WPTR, ring->wptr);
1516 		(void)RREG32(CP_RB1_WPTR);
1517 	} else {
1518 		WREG32(CP_RB2_WPTR, ring->wptr);
1519 		(void)RREG32(CP_RB2_WPTR);
1520 	}
1521 }
1522 
cayman_cp_load_microcode(struct radeon_device * rdev)1523 static int cayman_cp_load_microcode(struct radeon_device *rdev)
1524 {
1525 	const __be32 *fw_data;
1526 	int i;
1527 
1528 	if (!rdev->me_fw || !rdev->pfp_fw)
1529 		return -EINVAL;
1530 
1531 	cayman_cp_enable(rdev, false);
1532 
1533 	fw_data = (const __be32 *)rdev->pfp_fw->data;
1534 	WREG32(CP_PFP_UCODE_ADDR, 0);
1535 	for (i = 0; i < CAYMAN_PFP_UCODE_SIZE; i++)
1536 		WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
1537 	WREG32(CP_PFP_UCODE_ADDR, 0);
1538 
1539 	fw_data = (const __be32 *)rdev->me_fw->data;
1540 	WREG32(CP_ME_RAM_WADDR, 0);
1541 	for (i = 0; i < CAYMAN_PM4_UCODE_SIZE; i++)
1542 		WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
1543 
1544 	WREG32(CP_PFP_UCODE_ADDR, 0);
1545 	WREG32(CP_ME_RAM_WADDR, 0);
1546 	WREG32(CP_ME_RAM_RADDR, 0);
1547 	return 0;
1548 }
1549 
cayman_cp_start(struct radeon_device * rdev)1550 static int cayman_cp_start(struct radeon_device *rdev)
1551 {
1552 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1553 	int r, i;
1554 
1555 	r = radeon_ring_lock(rdev, ring, 7);
1556 	if (r) {
1557 		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1558 		return r;
1559 	}
1560 	radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
1561 	radeon_ring_write(ring, 0x1);
1562 	radeon_ring_write(ring, 0x0);
1563 	radeon_ring_write(ring, rdev->config.cayman.max_hw_contexts - 1);
1564 	radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1565 	radeon_ring_write(ring, 0);
1566 	radeon_ring_write(ring, 0);
1567 	radeon_ring_unlock_commit(rdev, ring, false);
1568 
1569 	cayman_cp_enable(rdev, true);
1570 
1571 	r = radeon_ring_lock(rdev, ring, cayman_default_size + 19);
1572 	if (r) {
1573 		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1574 		return r;
1575 	}
1576 
1577 	/* setup clear context state */
1578 	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1579 	radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
1580 
1581 	for (i = 0; i < cayman_default_size; i++)
1582 		radeon_ring_write(ring, cayman_default_state[i]);
1583 
1584 	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1585 	radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
1586 
1587 	/* set clear context state */
1588 	radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
1589 	radeon_ring_write(ring, 0);
1590 
1591 	/* SQ_VTX_BASE_VTX_LOC */
1592 	radeon_ring_write(ring, 0xc0026f00);
1593 	radeon_ring_write(ring, 0x00000000);
1594 	radeon_ring_write(ring, 0x00000000);
1595 	radeon_ring_write(ring, 0x00000000);
1596 
1597 	/* Clear consts */
1598 	radeon_ring_write(ring, 0xc0036f00);
1599 	radeon_ring_write(ring, 0x00000bc4);
1600 	radeon_ring_write(ring, 0xffffffff);
1601 	radeon_ring_write(ring, 0xffffffff);
1602 	radeon_ring_write(ring, 0xffffffff);
1603 
1604 	radeon_ring_write(ring, 0xc0026900);
1605 	radeon_ring_write(ring, 0x00000316);
1606 	radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
1607 	radeon_ring_write(ring, 0x00000010); /*  */
1608 
1609 	radeon_ring_unlock_commit(rdev, ring, false);
1610 
1611 	/* XXX init other rings */
1612 
1613 	return 0;
1614 }
1615 
cayman_cp_fini(struct radeon_device * rdev)1616 static void cayman_cp_fini(struct radeon_device *rdev)
1617 {
1618 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1619 	cayman_cp_enable(rdev, false);
1620 	radeon_ring_fini(rdev, ring);
1621 	radeon_scratch_free(rdev, ring->rptr_save_reg);
1622 }
1623 
cayman_cp_resume(struct radeon_device * rdev)1624 static int cayman_cp_resume(struct radeon_device *rdev)
1625 {
1626 	static const int ridx[] = {
1627 		RADEON_RING_TYPE_GFX_INDEX,
1628 		CAYMAN_RING_TYPE_CP1_INDEX,
1629 		CAYMAN_RING_TYPE_CP2_INDEX
1630 	};
1631 	static const unsigned cp_rb_cntl[] = {
1632 		CP_RB0_CNTL,
1633 		CP_RB1_CNTL,
1634 		CP_RB2_CNTL,
1635 	};
1636 	static const unsigned cp_rb_rptr_addr[] = {
1637 		CP_RB0_RPTR_ADDR,
1638 		CP_RB1_RPTR_ADDR,
1639 		CP_RB2_RPTR_ADDR
1640 	};
1641 	static const unsigned cp_rb_rptr_addr_hi[] = {
1642 		CP_RB0_RPTR_ADDR_HI,
1643 		CP_RB1_RPTR_ADDR_HI,
1644 		CP_RB2_RPTR_ADDR_HI
1645 	};
1646 	static const unsigned cp_rb_base[] = {
1647 		CP_RB0_BASE,
1648 		CP_RB1_BASE,
1649 		CP_RB2_BASE
1650 	};
1651 	static const unsigned cp_rb_rptr[] = {
1652 		CP_RB0_RPTR,
1653 		CP_RB1_RPTR,
1654 		CP_RB2_RPTR
1655 	};
1656 	static const unsigned cp_rb_wptr[] = {
1657 		CP_RB0_WPTR,
1658 		CP_RB1_WPTR,
1659 		CP_RB2_WPTR
1660 	};
1661 	struct radeon_ring *ring;
1662 	int i, r;
1663 
1664 	/* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
1665 	WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
1666 				 SOFT_RESET_PA |
1667 				 SOFT_RESET_SH |
1668 				 SOFT_RESET_VGT |
1669 				 SOFT_RESET_SPI |
1670 				 SOFT_RESET_SX));
1671 	RREG32(GRBM_SOFT_RESET);
1672 	mdelay(15);
1673 	WREG32(GRBM_SOFT_RESET, 0);
1674 	RREG32(GRBM_SOFT_RESET);
1675 
1676 	WREG32(CP_SEM_WAIT_TIMER, 0x0);
1677 	WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
1678 
1679 	/* Set the write pointer delay */
1680 	WREG32(CP_RB_WPTR_DELAY, 0);
1681 
1682 	WREG32(CP_DEBUG, (1 << 27));
1683 
1684 	/* set the wb address whether it's enabled or not */
1685 	WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
1686 	WREG32(SCRATCH_UMSK, 0xff);
1687 
1688 	for (i = 0; i < 3; ++i) {
1689 		uint32_t rb_cntl;
1690 		uint64_t addr;
1691 
1692 		/* Set ring buffer size */
1693 		ring = &rdev->ring[ridx[i]];
1694 		rb_cntl = order_base_2(ring->ring_size / 8);
1695 		rb_cntl |= order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8;
1696 #ifdef __BIG_ENDIAN
1697 		rb_cntl |= BUF_SWAP_32BIT;
1698 #endif
1699 		WREG32(cp_rb_cntl[i], rb_cntl);
1700 
1701 		/* set the wb address whether it's enabled or not */
1702 		addr = rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET;
1703 		WREG32(cp_rb_rptr_addr[i], addr & 0xFFFFFFFC);
1704 		WREG32(cp_rb_rptr_addr_hi[i], upper_32_bits(addr) & 0xFF);
1705 	}
1706 
1707 	/* set the rb base addr, this causes an internal reset of ALL rings */
1708 	for (i = 0; i < 3; ++i) {
1709 		ring = &rdev->ring[ridx[i]];
1710 		WREG32(cp_rb_base[i], ring->gpu_addr >> 8);
1711 	}
1712 
1713 	for (i = 0; i < 3; ++i) {
1714 		/* Initialize the ring buffer's read and write pointers */
1715 		ring = &rdev->ring[ridx[i]];
1716 		WREG32_P(cp_rb_cntl[i], RB_RPTR_WR_ENA, ~RB_RPTR_WR_ENA);
1717 
1718 		ring->wptr = 0;
1719 		WREG32(cp_rb_rptr[i], 0);
1720 		WREG32(cp_rb_wptr[i], ring->wptr);
1721 
1722 		mdelay(1);
1723 		WREG32_P(cp_rb_cntl[i], 0, ~RB_RPTR_WR_ENA);
1724 	}
1725 
1726 	/* start the rings */
1727 	cayman_cp_start(rdev);
1728 	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
1729 	rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
1730 	rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
1731 	/* this only test cp0 */
1732 	r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
1733 	if (r) {
1734 		rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1735 		rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
1736 		rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
1737 		return r;
1738 	}
1739 
1740 	if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
1741 		radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
1742 
1743 	return 0;
1744 }
1745 
cayman_gpu_check_soft_reset(struct radeon_device * rdev)1746 u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev)
1747 {
1748 	u32 reset_mask = 0;
1749 	u32 tmp;
1750 
1751 	/* GRBM_STATUS */
1752 	tmp = RREG32(GRBM_STATUS);
1753 	if (tmp & (PA_BUSY | SC_BUSY |
1754 		   SH_BUSY | SX_BUSY |
1755 		   TA_BUSY | VGT_BUSY |
1756 		   DB_BUSY | CB_BUSY |
1757 		   GDS_BUSY | SPI_BUSY |
1758 		   IA_BUSY | IA_BUSY_NO_DMA))
1759 		reset_mask |= RADEON_RESET_GFX;
1760 
1761 	if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
1762 		   CP_BUSY | CP_COHERENCY_BUSY))
1763 		reset_mask |= RADEON_RESET_CP;
1764 
1765 	if (tmp & GRBM_EE_BUSY)
1766 		reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
1767 
1768 	/* DMA_STATUS_REG 0 */
1769 	tmp = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
1770 	if (!(tmp & DMA_IDLE))
1771 		reset_mask |= RADEON_RESET_DMA;
1772 
1773 	/* DMA_STATUS_REG 1 */
1774 	tmp = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
1775 	if (!(tmp & DMA_IDLE))
1776 		reset_mask |= RADEON_RESET_DMA1;
1777 
1778 	/* SRBM_STATUS2 */
1779 	tmp = RREG32(SRBM_STATUS2);
1780 	if (tmp & DMA_BUSY)
1781 		reset_mask |= RADEON_RESET_DMA;
1782 
1783 	if (tmp & DMA1_BUSY)
1784 		reset_mask |= RADEON_RESET_DMA1;
1785 
1786 	/* SRBM_STATUS */
1787 	tmp = RREG32(SRBM_STATUS);
1788 	if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
1789 		reset_mask |= RADEON_RESET_RLC;
1790 
1791 	if (tmp & IH_BUSY)
1792 		reset_mask |= RADEON_RESET_IH;
1793 
1794 	if (tmp & SEM_BUSY)
1795 		reset_mask |= RADEON_RESET_SEM;
1796 
1797 	if (tmp & GRBM_RQ_PENDING)
1798 		reset_mask |= RADEON_RESET_GRBM;
1799 
1800 	if (tmp & VMC_BUSY)
1801 		reset_mask |= RADEON_RESET_VMC;
1802 
1803 	if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
1804 		   MCC_BUSY | MCD_BUSY))
1805 		reset_mask |= RADEON_RESET_MC;
1806 
1807 	if (evergreen_is_display_hung(rdev))
1808 		reset_mask |= RADEON_RESET_DISPLAY;
1809 
1810 	/* VM_L2_STATUS */
1811 	tmp = RREG32(VM_L2_STATUS);
1812 	if (tmp & L2_BUSY)
1813 		reset_mask |= RADEON_RESET_VMC;
1814 
1815 	/* Skip MC reset as it's mostly likely not hung, just busy */
1816 	if (reset_mask & RADEON_RESET_MC) {
1817 		DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
1818 		reset_mask &= ~RADEON_RESET_MC;
1819 	}
1820 
1821 	return reset_mask;
1822 }
1823 
cayman_gpu_soft_reset(struct radeon_device * rdev,u32 reset_mask)1824 static void cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
1825 {
1826 	struct evergreen_mc_save save;
1827 	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
1828 	u32 tmp;
1829 
1830 	if (reset_mask == 0)
1831 		return;
1832 
1833 	dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
1834 
1835 	evergreen_print_gpu_status_regs(rdev);
1836 	dev_info(rdev->dev, "  VM_CONTEXT0_PROTECTION_FAULT_ADDR   0x%08X\n",
1837 		 RREG32(0x14F8));
1838 	dev_info(rdev->dev, "  VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
1839 		 RREG32(0x14D8));
1840 	dev_info(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
1841 		 RREG32(0x14FC));
1842 	dev_info(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1843 		 RREG32(0x14DC));
1844 
1845 	/* Disable CP parsing/prefetching */
1846 	WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
1847 
1848 	if (reset_mask & RADEON_RESET_DMA) {
1849 		/* dma0 */
1850 		tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
1851 		tmp &= ~DMA_RB_ENABLE;
1852 		WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
1853 	}
1854 
1855 	if (reset_mask & RADEON_RESET_DMA1) {
1856 		/* dma1 */
1857 		tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
1858 		tmp &= ~DMA_RB_ENABLE;
1859 		WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
1860 	}
1861 
1862 	udelay(50);
1863 
1864 	evergreen_mc_stop(rdev, &save);
1865 	if (evergreen_mc_wait_for_idle(rdev)) {
1866 		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1867 	}
1868 
1869 	if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
1870 		grbm_soft_reset = SOFT_RESET_CB |
1871 			SOFT_RESET_DB |
1872 			SOFT_RESET_GDS |
1873 			SOFT_RESET_PA |
1874 			SOFT_RESET_SC |
1875 			SOFT_RESET_SPI |
1876 			SOFT_RESET_SH |
1877 			SOFT_RESET_SX |
1878 			SOFT_RESET_TC |
1879 			SOFT_RESET_TA |
1880 			SOFT_RESET_VGT |
1881 			SOFT_RESET_IA;
1882 	}
1883 
1884 	if (reset_mask & RADEON_RESET_CP) {
1885 		grbm_soft_reset |= SOFT_RESET_CP | SOFT_RESET_VGT;
1886 
1887 		srbm_soft_reset |= SOFT_RESET_GRBM;
1888 	}
1889 
1890 	if (reset_mask & RADEON_RESET_DMA)
1891 		srbm_soft_reset |= SOFT_RESET_DMA;
1892 
1893 	if (reset_mask & RADEON_RESET_DMA1)
1894 		srbm_soft_reset |= SOFT_RESET_DMA1;
1895 
1896 	if (reset_mask & RADEON_RESET_DISPLAY)
1897 		srbm_soft_reset |= SOFT_RESET_DC;
1898 
1899 	if (reset_mask & RADEON_RESET_RLC)
1900 		srbm_soft_reset |= SOFT_RESET_RLC;
1901 
1902 	if (reset_mask & RADEON_RESET_SEM)
1903 		srbm_soft_reset |= SOFT_RESET_SEM;
1904 
1905 	if (reset_mask & RADEON_RESET_IH)
1906 		srbm_soft_reset |= SOFT_RESET_IH;
1907 
1908 	if (reset_mask & RADEON_RESET_GRBM)
1909 		srbm_soft_reset |= SOFT_RESET_GRBM;
1910 
1911 	if (reset_mask & RADEON_RESET_VMC)
1912 		srbm_soft_reset |= SOFT_RESET_VMC;
1913 
1914 	if (!(rdev->flags & RADEON_IS_IGP)) {
1915 		if (reset_mask & RADEON_RESET_MC)
1916 			srbm_soft_reset |= SOFT_RESET_MC;
1917 	}
1918 
1919 	if (grbm_soft_reset) {
1920 		tmp = RREG32(GRBM_SOFT_RESET);
1921 		tmp |= grbm_soft_reset;
1922 		dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
1923 		WREG32(GRBM_SOFT_RESET, tmp);
1924 		tmp = RREG32(GRBM_SOFT_RESET);
1925 
1926 		udelay(50);
1927 
1928 		tmp &= ~grbm_soft_reset;
1929 		WREG32(GRBM_SOFT_RESET, tmp);
1930 		tmp = RREG32(GRBM_SOFT_RESET);
1931 	}
1932 
1933 	if (srbm_soft_reset) {
1934 		tmp = RREG32(SRBM_SOFT_RESET);
1935 		tmp |= srbm_soft_reset;
1936 		dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1937 		WREG32(SRBM_SOFT_RESET, tmp);
1938 		tmp = RREG32(SRBM_SOFT_RESET);
1939 
1940 		udelay(50);
1941 
1942 		tmp &= ~srbm_soft_reset;
1943 		WREG32(SRBM_SOFT_RESET, tmp);
1944 		tmp = RREG32(SRBM_SOFT_RESET);
1945 	}
1946 
1947 	/* Wait a little for things to settle down */
1948 	udelay(50);
1949 
1950 	evergreen_mc_resume(rdev, &save);
1951 	udelay(50);
1952 
1953 	evergreen_print_gpu_status_regs(rdev);
1954 }
1955 
cayman_asic_reset(struct radeon_device * rdev,bool hard)1956 int cayman_asic_reset(struct radeon_device *rdev, bool hard)
1957 {
1958 	u32 reset_mask;
1959 
1960 	if (hard) {
1961 		evergreen_gpu_pci_config_reset(rdev);
1962 		return 0;
1963 	}
1964 
1965 	reset_mask = cayman_gpu_check_soft_reset(rdev);
1966 
1967 	if (reset_mask)
1968 		r600_set_bios_scratch_engine_hung(rdev, true);
1969 
1970 	cayman_gpu_soft_reset(rdev, reset_mask);
1971 
1972 	reset_mask = cayman_gpu_check_soft_reset(rdev);
1973 
1974 	if (reset_mask)
1975 		evergreen_gpu_pci_config_reset(rdev);
1976 
1977 	r600_set_bios_scratch_engine_hung(rdev, false);
1978 
1979 	return 0;
1980 }
1981 
1982 /**
1983  * cayman_gfx_is_lockup - Check if the GFX engine is locked up
1984  *
1985  * @rdev: radeon_device pointer
1986  * @ring: radeon_ring structure holding ring information
1987  *
1988  * Check if the GFX engine is locked up.
1989  * Returns true if the engine appears to be locked up, false if not.
1990  */
cayman_gfx_is_lockup(struct radeon_device * rdev,struct radeon_ring * ring)1991 bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1992 {
1993 	u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
1994 
1995 	if (!(reset_mask & (RADEON_RESET_GFX |
1996 			    RADEON_RESET_COMPUTE |
1997 			    RADEON_RESET_CP))) {
1998 		radeon_ring_lockup_update(rdev, ring);
1999 		return false;
2000 	}
2001 	return radeon_ring_test_lockup(rdev, ring);
2002 }
2003 
cayman_uvd_init(struct radeon_device * rdev)2004 static void cayman_uvd_init(struct radeon_device *rdev)
2005 {
2006 	int r;
2007 
2008 	if (!rdev->has_uvd)
2009 		return;
2010 
2011 	r = radeon_uvd_init(rdev);
2012 	if (r) {
2013 		dev_err(rdev->dev, "failed UVD (%d) init.\n", r);
2014 		/*
2015 		 * At this point rdev->uvd.vcpu_bo is NULL which trickles down
2016 		 * to early fails uvd_v2_2_resume() and thus nothing happens
2017 		 * there. So it is pointless to try to go through that code
2018 		 * hence why we disable uvd here.
2019 		 */
2020 		rdev->has_uvd = false;
2021 		return;
2022 	}
2023 	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
2024 	r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
2025 }
2026 
cayman_uvd_start(struct radeon_device * rdev)2027 static void cayman_uvd_start(struct radeon_device *rdev)
2028 {
2029 	int r;
2030 
2031 	if (!rdev->has_uvd)
2032 		return;
2033 
2034 	r = uvd_v2_2_resume(rdev);
2035 	if (r) {
2036 		dev_err(rdev->dev, "failed UVD resume (%d).\n", r);
2037 		goto error;
2038 	}
2039 	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
2040 	if (r) {
2041 		dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
2042 		goto error;
2043 	}
2044 	return;
2045 
2046 error:
2047 	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
2048 }
2049 
cayman_uvd_resume(struct radeon_device * rdev)2050 static void cayman_uvd_resume(struct radeon_device *rdev)
2051 {
2052 	struct radeon_ring *ring;
2053 	int r;
2054 
2055 	if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size)
2056 		return;
2057 
2058 	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
2059 	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
2060 	if (r) {
2061 		dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
2062 		return;
2063 	}
2064 	r = uvd_v1_0_init(rdev);
2065 	if (r) {
2066 		dev_err(rdev->dev, "failed initializing UVD (%d).\n", r);
2067 		return;
2068 	}
2069 }
2070 
cayman_vce_init(struct radeon_device * rdev)2071 static void cayman_vce_init(struct radeon_device *rdev)
2072 {
2073 	int r;
2074 
2075 	/* Only set for CHIP_ARUBA */
2076 	if (!rdev->has_vce)
2077 		return;
2078 
2079 	r = radeon_vce_init(rdev);
2080 	if (r) {
2081 		dev_err(rdev->dev, "failed VCE (%d) init.\n", r);
2082 		/*
2083 		 * At this point rdev->vce.vcpu_bo is NULL which trickles down
2084 		 * to early fails cayman_vce_start() and thus nothing happens
2085 		 * there. So it is pointless to try to go through that code
2086 		 * hence why we disable vce here.
2087 		 */
2088 		rdev->has_vce = false;
2089 		return;
2090 	}
2091 	rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_obj = NULL;
2092 	r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE1_INDEX], 4096);
2093 	rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_obj = NULL;
2094 	r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE2_INDEX], 4096);
2095 }
2096 
cayman_vce_start(struct radeon_device * rdev)2097 static void cayman_vce_start(struct radeon_device *rdev)
2098 {
2099 	int r;
2100 
2101 	if (!rdev->has_vce)
2102 		return;
2103 
2104 	r = radeon_vce_resume(rdev);
2105 	if (r) {
2106 		dev_err(rdev->dev, "failed VCE resume (%d).\n", r);
2107 		goto error;
2108 	}
2109 	r = vce_v1_0_resume(rdev);
2110 	if (r) {
2111 		dev_err(rdev->dev, "failed VCE resume (%d).\n", r);
2112 		goto error;
2113 	}
2114 	r = radeon_fence_driver_start_ring(rdev, TN_RING_TYPE_VCE1_INDEX);
2115 	if (r) {
2116 		dev_err(rdev->dev, "failed initializing VCE1 fences (%d).\n", r);
2117 		goto error;
2118 	}
2119 	r = radeon_fence_driver_start_ring(rdev, TN_RING_TYPE_VCE2_INDEX);
2120 	if (r) {
2121 		dev_err(rdev->dev, "failed initializing VCE2 fences (%d).\n", r);
2122 		goto error;
2123 	}
2124 	return;
2125 
2126 error:
2127 	rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0;
2128 	rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0;
2129 }
2130 
cayman_vce_resume(struct radeon_device * rdev)2131 static void cayman_vce_resume(struct radeon_device *rdev)
2132 {
2133 	struct radeon_ring *ring;
2134 	int r;
2135 
2136 	if (!rdev->has_vce || !rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size)
2137 		return;
2138 
2139 	ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
2140 	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
2141 	if (r) {
2142 		dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r);
2143 		return;
2144 	}
2145 	ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
2146 	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
2147 	if (r) {
2148 		dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r);
2149 		return;
2150 	}
2151 	r = vce_v1_0_init(rdev);
2152 	if (r) {
2153 		dev_err(rdev->dev, "failed initializing VCE (%d).\n", r);
2154 		return;
2155 	}
2156 }
2157 
cayman_startup(struct radeon_device * rdev)2158 static int cayman_startup(struct radeon_device *rdev)
2159 {
2160 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2161 	int r;
2162 
2163 	/* enable pcie gen2 link */
2164 	evergreen_pcie_gen2_enable(rdev);
2165 	/* enable aspm */
2166 	evergreen_program_aspm(rdev);
2167 
2168 	/* scratch needs to be initialized before MC */
2169 	r = r600_vram_scratch_init(rdev);
2170 	if (r)
2171 		return r;
2172 
2173 	evergreen_mc_program(rdev);
2174 
2175 	if (!(rdev->flags & RADEON_IS_IGP) && !rdev->pm.dpm_enabled) {
2176 		r = ni_mc_load_microcode(rdev);
2177 		if (r) {
2178 			DRM_ERROR("Failed to load MC firmware!\n");
2179 			return r;
2180 		}
2181 	}
2182 
2183 	r = cayman_pcie_gart_enable(rdev);
2184 	if (r)
2185 		return r;
2186 	cayman_gpu_init(rdev);
2187 
2188 	/* allocate rlc buffers */
2189 	if (rdev->flags & RADEON_IS_IGP) {
2190 		rdev->rlc.reg_list = tn_rlc_save_restore_register_list;
2191 		rdev->rlc.reg_list_size =
2192 			(u32)ARRAY_SIZE(tn_rlc_save_restore_register_list);
2193 		rdev->rlc.cs_data = cayman_cs_data;
2194 		r = sumo_rlc_init(rdev);
2195 		if (r) {
2196 			DRM_ERROR("Failed to init rlc BOs!\n");
2197 			return r;
2198 		}
2199 	}
2200 
2201 	/* allocate wb buffer */
2202 	r = radeon_wb_init(rdev);
2203 	if (r)
2204 		return r;
2205 
2206 	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
2207 	if (r) {
2208 		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
2209 		return r;
2210 	}
2211 
2212 	cayman_uvd_start(rdev);
2213 	cayman_vce_start(rdev);
2214 
2215 	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
2216 	if (r) {
2217 		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
2218 		return r;
2219 	}
2220 
2221 	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
2222 	if (r) {
2223 		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
2224 		return r;
2225 	}
2226 
2227 	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
2228 	if (r) {
2229 		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
2230 		return r;
2231 	}
2232 
2233 	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
2234 	if (r) {
2235 		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
2236 		return r;
2237 	}
2238 
2239 	/* Enable IRQ */
2240 	if (!rdev->irq.installed) {
2241 		r = radeon_irq_kms_init(rdev);
2242 		if (r)
2243 			return r;
2244 	}
2245 
2246 	r = r600_irq_init(rdev);
2247 	if (r) {
2248 		DRM_ERROR("radeon: IH init failed (%d).\n", r);
2249 		radeon_irq_kms_fini(rdev);
2250 		return r;
2251 	}
2252 	evergreen_irq_set(rdev);
2253 
2254 	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
2255 			     RADEON_CP_PACKET2);
2256 	if (r)
2257 		return r;
2258 
2259 	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
2260 	r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
2261 			     DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
2262 	if (r)
2263 		return r;
2264 
2265 	ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
2266 	r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
2267 			     DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
2268 	if (r)
2269 		return r;
2270 
2271 	r = cayman_cp_load_microcode(rdev);
2272 	if (r)
2273 		return r;
2274 	r = cayman_cp_resume(rdev);
2275 	if (r)
2276 		return r;
2277 
2278 	r = cayman_dma_resume(rdev);
2279 	if (r)
2280 		return r;
2281 
2282 	cayman_uvd_resume(rdev);
2283 	cayman_vce_resume(rdev);
2284 
2285 	r = radeon_ib_pool_init(rdev);
2286 	if (r) {
2287 		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
2288 		return r;
2289 	}
2290 
2291 	r = radeon_vm_manager_init(rdev);
2292 	if (r) {
2293 		dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
2294 		return r;
2295 	}
2296 
2297 	r = radeon_audio_init(rdev);
2298 	if (r)
2299 		return r;
2300 
2301 	return 0;
2302 }
2303 
cayman_resume(struct radeon_device * rdev)2304 int cayman_resume(struct radeon_device *rdev)
2305 {
2306 	int r;
2307 
2308 	/* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
2309 	 * posting will perform necessary task to bring back GPU into good
2310 	 * shape.
2311 	 */
2312 	/* post card */
2313 	atom_asic_init(rdev->mode_info.atom_context);
2314 
2315 	/* init golden registers */
2316 	ni_init_golden_registers(rdev);
2317 
2318 	if (rdev->pm.pm_method == PM_METHOD_DPM)
2319 		radeon_pm_resume(rdev);
2320 
2321 	rdev->accel_working = true;
2322 	r = cayman_startup(rdev);
2323 	if (r) {
2324 		DRM_ERROR("cayman startup failed on resume\n");
2325 		rdev->accel_working = false;
2326 		return r;
2327 	}
2328 	return r;
2329 }
2330 
cayman_suspend(struct radeon_device * rdev)2331 int cayman_suspend(struct radeon_device *rdev)
2332 {
2333 	radeon_pm_suspend(rdev);
2334 	radeon_audio_fini(rdev);
2335 	radeon_vm_manager_fini(rdev);
2336 	cayman_cp_enable(rdev, false);
2337 	cayman_dma_stop(rdev);
2338 	if (rdev->has_uvd) {
2339 		uvd_v1_0_fini(rdev);
2340 		radeon_uvd_suspend(rdev);
2341 	}
2342 	evergreen_irq_suspend(rdev);
2343 	radeon_wb_disable(rdev);
2344 	cayman_pcie_gart_disable(rdev);
2345 	return 0;
2346 }
2347 
2348 /* Plan is to move initialization in that function and use
2349  * helper function so that radeon_device_init pretty much
2350  * do nothing more than calling asic specific function. This
2351  * should also allow to remove a bunch of callback function
2352  * like vram_info.
2353  */
cayman_init(struct radeon_device * rdev)2354 int cayman_init(struct radeon_device *rdev)
2355 {
2356 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2357 	int r;
2358 
2359 	/* Read BIOS */
2360 	if (!radeon_get_bios(rdev)) {
2361 		if (ASIC_IS_AVIVO(rdev))
2362 			return -EINVAL;
2363 	}
2364 	/* Must be an ATOMBIOS */
2365 	if (!rdev->is_atom_bios) {
2366 		dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
2367 		return -EINVAL;
2368 	}
2369 	r = radeon_atombios_init(rdev);
2370 	if (r)
2371 		return r;
2372 
2373 	/* Post card if necessary */
2374 	if (!radeon_card_posted(rdev)) {
2375 		if (!rdev->bios) {
2376 			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
2377 			return -EINVAL;
2378 		}
2379 		DRM_INFO("GPU not posted. posting now...\n");
2380 		atom_asic_init(rdev->mode_info.atom_context);
2381 	}
2382 	/* init golden registers */
2383 	ni_init_golden_registers(rdev);
2384 	/* Initialize scratch registers */
2385 	r600_scratch_init(rdev);
2386 	/* Initialize surface registers */
2387 	radeon_surface_init(rdev);
2388 	/* Initialize clocks */
2389 	radeon_get_clock_info(rdev->ddev);
2390 	/* Fence driver */
2391 	r = radeon_fence_driver_init(rdev);
2392 	if (r)
2393 		return r;
2394 	/* initialize memory controller */
2395 	r = evergreen_mc_init(rdev);
2396 	if (r)
2397 		return r;
2398 	/* Memory manager */
2399 	r = radeon_bo_init(rdev);
2400 	if (r)
2401 		return r;
2402 
2403 	if (rdev->flags & RADEON_IS_IGP) {
2404 		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
2405 			r = ni_init_microcode(rdev);
2406 			if (r) {
2407 				DRM_ERROR("Failed to load firmware!\n");
2408 				return r;
2409 			}
2410 		}
2411 	} else {
2412 		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
2413 			r = ni_init_microcode(rdev);
2414 			if (r) {
2415 				DRM_ERROR("Failed to load firmware!\n");
2416 				return r;
2417 			}
2418 		}
2419 	}
2420 
2421 	/* Initialize power management */
2422 	radeon_pm_init(rdev);
2423 
2424 	ring->ring_obj = NULL;
2425 	r600_ring_init(rdev, ring, 1024 * 1024);
2426 
2427 	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
2428 	ring->ring_obj = NULL;
2429 	r600_ring_init(rdev, ring, 64 * 1024);
2430 
2431 	ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
2432 	ring->ring_obj = NULL;
2433 	r600_ring_init(rdev, ring, 64 * 1024);
2434 
2435 	cayman_uvd_init(rdev);
2436 	cayman_vce_init(rdev);
2437 
2438 	rdev->ih.ring_obj = NULL;
2439 	r600_ih_ring_init(rdev, 64 * 1024);
2440 
2441 	r = r600_pcie_gart_init(rdev);
2442 	if (r)
2443 		return r;
2444 
2445 	rdev->accel_working = true;
2446 	r = cayman_startup(rdev);
2447 	if (r) {
2448 		dev_err(rdev->dev, "disabling GPU acceleration\n");
2449 		cayman_cp_fini(rdev);
2450 		cayman_dma_fini(rdev);
2451 		r600_irq_fini(rdev);
2452 		if (rdev->flags & RADEON_IS_IGP)
2453 			sumo_rlc_fini(rdev);
2454 		radeon_wb_fini(rdev);
2455 		radeon_ib_pool_fini(rdev);
2456 		radeon_vm_manager_fini(rdev);
2457 		radeon_irq_kms_fini(rdev);
2458 		cayman_pcie_gart_fini(rdev);
2459 		rdev->accel_working = false;
2460 	}
2461 
2462 	/* Don't start up if the MC ucode is missing.
2463 	 * The default clocks and voltages before the MC ucode
2464 	 * is loaded are not suffient for advanced operations.
2465 	 *
2466 	 * We can skip this check for TN, because there is no MC
2467 	 * ucode.
2468 	 */
2469 	if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
2470 		DRM_ERROR("radeon: MC ucode required for NI+.\n");
2471 		return -EINVAL;
2472 	}
2473 
2474 	return 0;
2475 }
2476 
cayman_fini(struct radeon_device * rdev)2477 void cayman_fini(struct radeon_device *rdev)
2478 {
2479 	radeon_pm_fini(rdev);
2480 	cayman_cp_fini(rdev);
2481 	cayman_dma_fini(rdev);
2482 	r600_irq_fini(rdev);
2483 	if (rdev->flags & RADEON_IS_IGP)
2484 		sumo_rlc_fini(rdev);
2485 	radeon_wb_fini(rdev);
2486 	radeon_vm_manager_fini(rdev);
2487 	radeon_ib_pool_fini(rdev);
2488 	radeon_irq_kms_fini(rdev);
2489 	uvd_v1_0_fini(rdev);
2490 	radeon_uvd_fini(rdev);
2491 	if (rdev->has_vce)
2492 		radeon_vce_fini(rdev);
2493 	cayman_pcie_gart_fini(rdev);
2494 	r600_vram_scratch_fini(rdev);
2495 	radeon_gem_fini(rdev);
2496 	radeon_fence_driver_fini(rdev);
2497 	radeon_bo_fini(rdev);
2498 	radeon_atombios_fini(rdev);
2499 	kfree(rdev->bios);
2500 	rdev->bios = NULL;
2501 }
2502 
2503 /*
2504  * vm
2505  */
cayman_vm_init(struct radeon_device * rdev)2506 int cayman_vm_init(struct radeon_device *rdev)
2507 {
2508 	/* number of VMs */
2509 	rdev->vm_manager.nvm = 8;
2510 	/* base offset of vram pages */
2511 	if (rdev->flags & RADEON_IS_IGP) {
2512 		u64 tmp = RREG32(FUS_MC_VM_FB_OFFSET);
2513 		tmp <<= 22;
2514 		rdev->vm_manager.vram_base_offset = tmp;
2515 	} else
2516 		rdev->vm_manager.vram_base_offset = 0;
2517 	return 0;
2518 }
2519 
cayman_vm_fini(struct radeon_device * rdev)2520 void cayman_vm_fini(struct radeon_device *rdev)
2521 {
2522 }
2523 
2524 /**
2525  * cayman_vm_decode_fault - print human readable fault info
2526  *
2527  * @rdev: radeon_device pointer
2528  * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
2529  * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
2530  *
2531  * Print human readable fault information (cayman/TN).
2532  */
cayman_vm_decode_fault(struct radeon_device * rdev,u32 status,u32 addr)2533 void cayman_vm_decode_fault(struct radeon_device *rdev,
2534 			    u32 status, u32 addr)
2535 {
2536 	u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
2537 	u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
2538 	u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
2539 	char *block;
2540 
2541 	switch (mc_id) {
2542 	case 32:
2543 	case 16:
2544 	case 96:
2545 	case 80:
2546 	case 160:
2547 	case 144:
2548 	case 224:
2549 	case 208:
2550 		block = "CB";
2551 		break;
2552 	case 33:
2553 	case 17:
2554 	case 97:
2555 	case 81:
2556 	case 161:
2557 	case 145:
2558 	case 225:
2559 	case 209:
2560 		block = "CB_FMASK";
2561 		break;
2562 	case 34:
2563 	case 18:
2564 	case 98:
2565 	case 82:
2566 	case 162:
2567 	case 146:
2568 	case 226:
2569 	case 210:
2570 		block = "CB_CMASK";
2571 		break;
2572 	case 35:
2573 	case 19:
2574 	case 99:
2575 	case 83:
2576 	case 163:
2577 	case 147:
2578 	case 227:
2579 	case 211:
2580 		block = "CB_IMMED";
2581 		break;
2582 	case 36:
2583 	case 20:
2584 	case 100:
2585 	case 84:
2586 	case 164:
2587 	case 148:
2588 	case 228:
2589 	case 212:
2590 		block = "DB";
2591 		break;
2592 	case 37:
2593 	case 21:
2594 	case 101:
2595 	case 85:
2596 	case 165:
2597 	case 149:
2598 	case 229:
2599 	case 213:
2600 		block = "DB_HTILE";
2601 		break;
2602 	case 38:
2603 	case 22:
2604 	case 102:
2605 	case 86:
2606 	case 166:
2607 	case 150:
2608 	case 230:
2609 	case 214:
2610 		block = "SX";
2611 		break;
2612 	case 39:
2613 	case 23:
2614 	case 103:
2615 	case 87:
2616 	case 167:
2617 	case 151:
2618 	case 231:
2619 	case 215:
2620 		block = "DB_STEN";
2621 		break;
2622 	case 40:
2623 	case 24:
2624 	case 104:
2625 	case 88:
2626 	case 232:
2627 	case 216:
2628 	case 168:
2629 	case 152:
2630 		block = "TC_TFETCH";
2631 		break;
2632 	case 41:
2633 	case 25:
2634 	case 105:
2635 	case 89:
2636 	case 233:
2637 	case 217:
2638 	case 169:
2639 	case 153:
2640 		block = "TC_VFETCH";
2641 		break;
2642 	case 42:
2643 	case 26:
2644 	case 106:
2645 	case 90:
2646 	case 234:
2647 	case 218:
2648 	case 170:
2649 	case 154:
2650 		block = "VC";
2651 		break;
2652 	case 112:
2653 		block = "CP";
2654 		break;
2655 	case 113:
2656 	case 114:
2657 		block = "SH";
2658 		break;
2659 	case 115:
2660 		block = "VGT";
2661 		break;
2662 	case 178:
2663 		block = "IH";
2664 		break;
2665 	case 51:
2666 		block = "RLC";
2667 		break;
2668 	case 55:
2669 		block = "DMA";
2670 		break;
2671 	case 56:
2672 		block = "HDP";
2673 		break;
2674 	default:
2675 		block = "unknown";
2676 		break;
2677 	}
2678 
2679 	printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n",
2680 	       protections, vmid, addr,
2681 	       (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
2682 	       block, mc_id);
2683 }
2684 
2685 /**
2686  * cayman_vm_flush - vm flush using the CP
2687  *
2688  * @rdev: radeon_device pointer
2689  *
2690  * Update the page table base and flush the VM TLB
2691  * using the CP (cayman-si).
2692  */
cayman_vm_flush(struct radeon_device * rdev,struct radeon_ring * ring,unsigned vm_id,uint64_t pd_addr)2693 void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
2694 		     unsigned vm_id, uint64_t pd_addr)
2695 {
2696 	radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2), 0));
2697 	radeon_ring_write(ring, pd_addr >> 12);
2698 
2699 	/* flush hdp cache */
2700 	radeon_ring_write(ring, PACKET0(HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
2701 	radeon_ring_write(ring, 0x1);
2702 
2703 	/* bits 0-7 are the VM contexts0-7 */
2704 	radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0));
2705 	radeon_ring_write(ring, 1 << vm_id);
2706 
2707 	/* wait for the invalidate to complete */
2708 	radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
2709 	radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) |  /* always */
2710 				 WAIT_REG_MEM_ENGINE(0))); /* me */
2711 	radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
2712 	radeon_ring_write(ring, 0);
2713 	radeon_ring_write(ring, 0); /* ref */
2714 	radeon_ring_write(ring, 0); /* mask */
2715 	radeon_ring_write(ring, 0x20); /* poll interval */
2716 
2717 	/* sync PFP to ME, otherwise we might get invalid PFP reads */
2718 	radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
2719 	radeon_ring_write(ring, 0x0);
2720 }
2721 
tn_set_vce_clocks(struct radeon_device * rdev,u32 evclk,u32 ecclk)2722 int tn_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk)
2723 {
2724 	struct atom_clock_dividers dividers;
2725 	int r, i;
2726 
2727 	r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
2728 					   ecclk, false, &dividers);
2729 	if (r)
2730 		return r;
2731 
2732 	for (i = 0; i < 100; i++) {
2733 		if (RREG32(CG_ECLK_STATUS) & ECLK_STATUS)
2734 			break;
2735 		mdelay(10);
2736 	}
2737 	if (i == 100)
2738 		return -ETIMEDOUT;
2739 
2740 	WREG32_P(CG_ECLK_CNTL, dividers.post_div, ~(ECLK_DIR_CNTL_EN|ECLK_DIVIDER_MASK));
2741 
2742 	for (i = 0; i < 100; i++) {
2743 		if (RREG32(CG_ECLK_STATUS) & ECLK_STATUS)
2744 			break;
2745 		mdelay(10);
2746 	}
2747 	if (i == 100)
2748 		return -ETIMEDOUT;
2749 
2750 	return 0;
2751 }
2752