1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28 #include <drm/drmP.h>
29 #include "amdgpu.h"
30 #include <drm/amdgpu_drm.h>
31 #include "amdgpu_uvd.h"
32 #include "amdgpu_vce.h"
33
34 #include <linux/vga_switcheroo.h>
35 #include <linux/slab.h>
36 #include <linux/pm_runtime.h>
37 #include "amdgpu_amdkfd.h"
38
39 /**
40 * amdgpu_driver_unload_kms - Main unload function for KMS.
41 *
42 * @dev: drm dev pointer
43 *
44 * This is the main unload function for KMS (all asics).
45 * Returns 0 on success.
46 */
amdgpu_driver_unload_kms(struct drm_device * dev)47 void amdgpu_driver_unload_kms(struct drm_device *dev)
48 {
49 struct amdgpu_device *adev = dev->dev_private;
50
51 if (adev == NULL)
52 return;
53
54 if (adev->rmmio == NULL)
55 goto done_free;
56
57 if (amdgpu_sriov_vf(adev))
58 amdgpu_virt_request_full_gpu(adev, false);
59
60 if (amdgpu_device_is_px(dev)) {
61 pm_runtime_get_sync(dev->dev);
62 pm_runtime_forbid(dev->dev);
63 }
64
65 amdgpu_amdkfd_device_fini(adev);
66
67 amdgpu_acpi_fini(adev);
68
69 amdgpu_device_fini(adev);
70
71 done_free:
72 kfree(adev);
73 dev->dev_private = NULL;
74 }
75
76 /**
77 * amdgpu_driver_load_kms - Main load function for KMS.
78 *
79 * @dev: drm dev pointer
80 * @flags: device flags
81 *
82 * This is the main load function for KMS (all asics).
83 * Returns 0 on success, error on failure.
84 */
amdgpu_driver_load_kms(struct drm_device * dev,unsigned long flags)85 int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
86 {
87 struct amdgpu_device *adev;
88 int r, acpi_status;
89
90 adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL);
91 if (adev == NULL) {
92 return -ENOMEM;
93 }
94 dev->dev_private = (void *)adev;
95
96 if ((amdgpu_runtime_pm != 0) &&
97 amdgpu_has_atpx() &&
98 (amdgpu_is_atpx_hybrid() ||
99 amdgpu_has_atpx_dgpu_power_cntl()) &&
100 ((flags & AMD_IS_APU) == 0) &&
101 !pci_is_thunderbolt_attached(dev->pdev))
102 flags |= AMD_IS_PX;
103
104 /* amdgpu_device_init should report only fatal error
105 * like memory allocation failure or iomapping failure,
106 * or memory manager initialization failure, it must
107 * properly initialize the GPU MC controller and permit
108 * VRAM allocation
109 */
110 r = amdgpu_device_init(adev, dev, dev->pdev, flags);
111 if (r) {
112 dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
113 goto out;
114 }
115
116 /* Call ACPI methods: require modeset init
117 * but failure is not fatal
118 */
119 if (!r) {
120 acpi_status = amdgpu_acpi_init(adev);
121 if (acpi_status)
122 dev_dbg(&dev->pdev->dev,
123 "Error during ACPI methods call\n");
124 }
125
126 amdgpu_amdkfd_device_probe(adev);
127 amdgpu_amdkfd_device_init(adev);
128
129 if (amdgpu_device_is_px(dev)) {
130 pm_runtime_use_autosuspend(dev->dev);
131 pm_runtime_set_autosuspend_delay(dev->dev, 5000);
132 pm_runtime_set_active(dev->dev);
133 pm_runtime_allow(dev->dev);
134 pm_runtime_mark_last_busy(dev->dev);
135 pm_runtime_put_autosuspend(dev->dev);
136 }
137
138 if (amdgpu_sriov_vf(adev))
139 amdgpu_virt_release_full_gpu(adev, true);
140
141 out:
142 if (r) {
143 /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
144 if (adev->rmmio && amdgpu_device_is_px(dev))
145 pm_runtime_put_noidle(dev->dev);
146 amdgpu_driver_unload_kms(dev);
147 }
148
149 return r;
150 }
151
amdgpu_firmware_info(struct drm_amdgpu_info_firmware * fw_info,struct drm_amdgpu_query_fw * query_fw,struct amdgpu_device * adev)152 static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
153 struct drm_amdgpu_query_fw *query_fw,
154 struct amdgpu_device *adev)
155 {
156 switch (query_fw->fw_type) {
157 case AMDGPU_INFO_FW_VCE:
158 fw_info->ver = adev->vce.fw_version;
159 fw_info->feature = adev->vce.fb_version;
160 break;
161 case AMDGPU_INFO_FW_UVD:
162 fw_info->ver = adev->uvd.fw_version;
163 fw_info->feature = 0;
164 break;
165 case AMDGPU_INFO_FW_GMC:
166 fw_info->ver = adev->mc.fw_version;
167 fw_info->feature = 0;
168 break;
169 case AMDGPU_INFO_FW_GFX_ME:
170 fw_info->ver = adev->gfx.me_fw_version;
171 fw_info->feature = adev->gfx.me_feature_version;
172 break;
173 case AMDGPU_INFO_FW_GFX_PFP:
174 fw_info->ver = adev->gfx.pfp_fw_version;
175 fw_info->feature = adev->gfx.pfp_feature_version;
176 break;
177 case AMDGPU_INFO_FW_GFX_CE:
178 fw_info->ver = adev->gfx.ce_fw_version;
179 fw_info->feature = adev->gfx.ce_feature_version;
180 break;
181 case AMDGPU_INFO_FW_GFX_RLC:
182 fw_info->ver = adev->gfx.rlc_fw_version;
183 fw_info->feature = adev->gfx.rlc_feature_version;
184 break;
185 case AMDGPU_INFO_FW_GFX_MEC:
186 if (query_fw->index == 0) {
187 fw_info->ver = adev->gfx.mec_fw_version;
188 fw_info->feature = adev->gfx.mec_feature_version;
189 } else if (query_fw->index == 1) {
190 fw_info->ver = adev->gfx.mec2_fw_version;
191 fw_info->feature = adev->gfx.mec2_feature_version;
192 } else
193 return -EINVAL;
194 break;
195 case AMDGPU_INFO_FW_SMC:
196 fw_info->ver = adev->pm.fw_version;
197 fw_info->feature = 0;
198 break;
199 case AMDGPU_INFO_FW_SDMA:
200 if (query_fw->index >= adev->sdma.num_instances)
201 return -EINVAL;
202 fw_info->ver = adev->sdma.instance[query_fw->index].fw_version;
203 fw_info->feature = adev->sdma.instance[query_fw->index].feature_version;
204 break;
205 case AMDGPU_INFO_FW_SOS:
206 fw_info->ver = adev->psp.sos_fw_version;
207 fw_info->feature = adev->psp.sos_feature_version;
208 break;
209 case AMDGPU_INFO_FW_ASD:
210 fw_info->ver = adev->psp.asd_fw_version;
211 fw_info->feature = adev->psp.asd_feature_version;
212 break;
213 default:
214 return -EINVAL;
215 }
216 return 0;
217 }
218
219 /*
220 * Userspace get information ioctl
221 */
222 /**
223 * amdgpu_info_ioctl - answer a device specific request.
224 *
225 * @adev: amdgpu device pointer
226 * @data: request object
227 * @filp: drm filp
228 *
229 * This function is used to pass device specific parameters to the userspace
230 * drivers. Examples include: pci device id, pipeline parms, tiling params,
231 * etc. (all asics).
232 * Returns 0 on success, -EINVAL on failure.
233 */
amdgpu_info_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)234 static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
235 {
236 struct amdgpu_device *adev = dev->dev_private;
237 struct amdgpu_fpriv *fpriv = filp->driver_priv;
238 struct drm_amdgpu_info *info = data;
239 struct amdgpu_mode_info *minfo = &adev->mode_info;
240 void __user *out = (void __user *)(uintptr_t)info->return_pointer;
241 uint32_t size = info->return_size;
242 struct drm_crtc *crtc;
243 uint32_t ui32 = 0;
244 uint64_t ui64 = 0;
245 int i, found;
246 int ui32_size = sizeof(ui32);
247
248 if (!info->return_size || !info->return_pointer)
249 return -EINVAL;
250 if (amdgpu_kms_vram_lost(adev, fpriv))
251 return -ENODEV;
252
253 switch (info->query) {
254 case AMDGPU_INFO_ACCEL_WORKING:
255 ui32 = adev->accel_working;
256 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
257 case AMDGPU_INFO_CRTC_FROM_ID:
258 for (i = 0, found = 0; i < adev->mode_info.num_crtc; i++) {
259 crtc = (struct drm_crtc *)minfo->crtcs[i];
260 if (crtc && crtc->base.id == info->mode_crtc.id) {
261 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
262 ui32 = amdgpu_crtc->crtc_id;
263 found = 1;
264 break;
265 }
266 }
267 if (!found) {
268 DRM_DEBUG_KMS("unknown crtc id %d\n", info->mode_crtc.id);
269 return -EINVAL;
270 }
271 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
272 case AMDGPU_INFO_HW_IP_INFO: {
273 struct drm_amdgpu_info_hw_ip ip = {};
274 enum amd_ip_block_type type;
275 uint32_t ring_mask = 0;
276 uint32_t ib_start_alignment = 0;
277 uint32_t ib_size_alignment = 0;
278
279 if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
280 return -EINVAL;
281
282 switch (info->query_hw_ip.type) {
283 case AMDGPU_HW_IP_GFX:
284 type = AMD_IP_BLOCK_TYPE_GFX;
285 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
286 ring_mask |= ((adev->gfx.gfx_ring[i].ready ? 1 : 0) << i);
287 ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
288 ib_size_alignment = 8;
289 break;
290 case AMDGPU_HW_IP_COMPUTE:
291 type = AMD_IP_BLOCK_TYPE_GFX;
292 for (i = 0; i < adev->gfx.num_compute_rings; i++)
293 ring_mask |= ((adev->gfx.compute_ring[i].ready ? 1 : 0) << i);
294 ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
295 ib_size_alignment = 8;
296 break;
297 case AMDGPU_HW_IP_DMA:
298 type = AMD_IP_BLOCK_TYPE_SDMA;
299 for (i = 0; i < adev->sdma.num_instances; i++)
300 ring_mask |= ((adev->sdma.instance[i].ring.ready ? 1 : 0) << i);
301 ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
302 ib_size_alignment = 1;
303 break;
304 case AMDGPU_HW_IP_UVD:
305 type = AMD_IP_BLOCK_TYPE_UVD;
306 ring_mask = adev->uvd.ring.ready ? 1 : 0;
307 ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
308 ib_size_alignment = 16;
309 break;
310 case AMDGPU_HW_IP_VCE:
311 type = AMD_IP_BLOCK_TYPE_VCE;
312 for (i = 0; i < adev->vce.num_rings; i++)
313 ring_mask |= ((adev->vce.ring[i].ready ? 1 : 0) << i);
314 ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
315 ib_size_alignment = 1;
316 break;
317 case AMDGPU_HW_IP_UVD_ENC:
318 type = AMD_IP_BLOCK_TYPE_UVD;
319 for (i = 0; i < adev->uvd.num_enc_rings; i++)
320 ring_mask |= ((adev->uvd.ring_enc[i].ready ? 1 : 0) << i);
321 ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
322 ib_size_alignment = 1;
323 break;
324 case AMDGPU_HW_IP_VCN_DEC:
325 type = AMD_IP_BLOCK_TYPE_VCN;
326 ring_mask = adev->vcn.ring_dec.ready ? 1 : 0;
327 ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
328 ib_size_alignment = 16;
329 break;
330 case AMDGPU_HW_IP_VCN_ENC:
331 type = AMD_IP_BLOCK_TYPE_VCN;
332 for (i = 0; i < adev->vcn.num_enc_rings; i++)
333 ring_mask |= ((adev->vcn.ring_enc[i].ready ? 1 : 0) << i);
334 ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
335 ib_size_alignment = 1;
336 break;
337 default:
338 return -EINVAL;
339 }
340
341 for (i = 0; i < adev->num_ip_blocks; i++) {
342 if (adev->ip_blocks[i].version->type == type &&
343 adev->ip_blocks[i].status.valid) {
344 ip.hw_ip_version_major = adev->ip_blocks[i].version->major;
345 ip.hw_ip_version_minor = adev->ip_blocks[i].version->minor;
346 ip.capabilities_flags = 0;
347 ip.available_rings = ring_mask;
348 ip.ib_start_alignment = ib_start_alignment;
349 ip.ib_size_alignment = ib_size_alignment;
350 break;
351 }
352 }
353 return copy_to_user(out, &ip,
354 min((size_t)size, sizeof(ip))) ? -EFAULT : 0;
355 }
356 case AMDGPU_INFO_HW_IP_COUNT: {
357 enum amd_ip_block_type type;
358 uint32_t count = 0;
359
360 switch (info->query_hw_ip.type) {
361 case AMDGPU_HW_IP_GFX:
362 type = AMD_IP_BLOCK_TYPE_GFX;
363 break;
364 case AMDGPU_HW_IP_COMPUTE:
365 type = AMD_IP_BLOCK_TYPE_GFX;
366 break;
367 case AMDGPU_HW_IP_DMA:
368 type = AMD_IP_BLOCK_TYPE_SDMA;
369 break;
370 case AMDGPU_HW_IP_UVD:
371 type = AMD_IP_BLOCK_TYPE_UVD;
372 break;
373 case AMDGPU_HW_IP_VCE:
374 type = AMD_IP_BLOCK_TYPE_VCE;
375 break;
376 case AMDGPU_HW_IP_UVD_ENC:
377 type = AMD_IP_BLOCK_TYPE_UVD;
378 break;
379 case AMDGPU_HW_IP_VCN_DEC:
380 case AMDGPU_HW_IP_VCN_ENC:
381 type = AMD_IP_BLOCK_TYPE_VCN;
382 break;
383 default:
384 return -EINVAL;
385 }
386
387 for (i = 0; i < adev->num_ip_blocks; i++)
388 if (adev->ip_blocks[i].version->type == type &&
389 adev->ip_blocks[i].status.valid &&
390 count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
391 count++;
392
393 return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0;
394 }
395 case AMDGPU_INFO_TIMESTAMP:
396 ui64 = amdgpu_gfx_get_gpu_clock_counter(adev);
397 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
398 case AMDGPU_INFO_FW_VERSION: {
399 struct drm_amdgpu_info_firmware fw_info;
400 int ret;
401
402 /* We only support one instance of each IP block right now. */
403 if (info->query_fw.ip_instance != 0)
404 return -EINVAL;
405
406 ret = amdgpu_firmware_info(&fw_info, &info->query_fw, adev);
407 if (ret)
408 return ret;
409
410 return copy_to_user(out, &fw_info,
411 min((size_t)size, sizeof(fw_info))) ? -EFAULT : 0;
412 }
413 case AMDGPU_INFO_NUM_BYTES_MOVED:
414 ui64 = atomic64_read(&adev->num_bytes_moved);
415 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
416 case AMDGPU_INFO_NUM_EVICTIONS:
417 ui64 = atomic64_read(&adev->num_evictions);
418 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
419 case AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS:
420 ui64 = atomic64_read(&adev->num_vram_cpu_page_faults);
421 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
422 case AMDGPU_INFO_VRAM_USAGE:
423 ui64 = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
424 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
425 case AMDGPU_INFO_VIS_VRAM_USAGE:
426 ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
427 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
428 case AMDGPU_INFO_GTT_USAGE:
429 ui64 = amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
430 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
431 case AMDGPU_INFO_GDS_CONFIG: {
432 struct drm_amdgpu_info_gds gds_info;
433
434 memset(&gds_info, 0, sizeof(gds_info));
435 gds_info.gds_gfx_partition_size = adev->gds.mem.gfx_partition_size >> AMDGPU_GDS_SHIFT;
436 gds_info.compute_partition_size = adev->gds.mem.cs_partition_size >> AMDGPU_GDS_SHIFT;
437 gds_info.gds_total_size = adev->gds.mem.total_size >> AMDGPU_GDS_SHIFT;
438 gds_info.gws_per_gfx_partition = adev->gds.gws.gfx_partition_size >> AMDGPU_GWS_SHIFT;
439 gds_info.gws_per_compute_partition = adev->gds.gws.cs_partition_size >> AMDGPU_GWS_SHIFT;
440 gds_info.oa_per_gfx_partition = adev->gds.oa.gfx_partition_size >> AMDGPU_OA_SHIFT;
441 gds_info.oa_per_compute_partition = adev->gds.oa.cs_partition_size >> AMDGPU_OA_SHIFT;
442 return copy_to_user(out, &gds_info,
443 min((size_t)size, sizeof(gds_info))) ? -EFAULT : 0;
444 }
445 case AMDGPU_INFO_VRAM_GTT: {
446 struct drm_amdgpu_info_vram_gtt vram_gtt;
447
448 vram_gtt.vram_size = adev->mc.real_vram_size;
449 vram_gtt.vram_size -= adev->vram_pin_size;
450 vram_gtt.vram_cpu_accessible_size = adev->mc.visible_vram_size;
451 vram_gtt.vram_cpu_accessible_size -= (adev->vram_pin_size - adev->invisible_pin_size);
452 vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
453 vram_gtt.gtt_size *= PAGE_SIZE;
454 vram_gtt.gtt_size -= adev->gart_pin_size;
455 return copy_to_user(out, &vram_gtt,
456 min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
457 }
458 case AMDGPU_INFO_MEMORY: {
459 struct drm_amdgpu_memory_info mem;
460
461 memset(&mem, 0, sizeof(mem));
462 mem.vram.total_heap_size = adev->mc.real_vram_size;
463 mem.vram.usable_heap_size =
464 adev->mc.real_vram_size - adev->vram_pin_size;
465 mem.vram.heap_usage =
466 amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
467 mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
468
469 mem.cpu_accessible_vram.total_heap_size =
470 adev->mc.visible_vram_size;
471 mem.cpu_accessible_vram.usable_heap_size =
472 adev->mc.visible_vram_size -
473 (adev->vram_pin_size - adev->invisible_pin_size);
474 mem.cpu_accessible_vram.heap_usage =
475 amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
476 mem.cpu_accessible_vram.max_allocation =
477 mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
478
479 mem.gtt.total_heap_size = adev->mman.bdev.man[TTM_PL_TT].size;
480 mem.gtt.total_heap_size *= PAGE_SIZE;
481 mem.gtt.usable_heap_size = mem.gtt.total_heap_size
482 - adev->gart_pin_size;
483 mem.gtt.heap_usage =
484 amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
485 mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
486
487 return copy_to_user(out, &mem,
488 min((size_t)size, sizeof(mem)))
489 ? -EFAULT : 0;
490 }
491 case AMDGPU_INFO_READ_MMR_REG: {
492 unsigned n, alloc_size;
493 uint32_t *regs;
494 unsigned se_num = (info->read_mmr_reg.instance >>
495 AMDGPU_INFO_MMR_SE_INDEX_SHIFT) &
496 AMDGPU_INFO_MMR_SE_INDEX_MASK;
497 unsigned sh_num = (info->read_mmr_reg.instance >>
498 AMDGPU_INFO_MMR_SH_INDEX_SHIFT) &
499 AMDGPU_INFO_MMR_SH_INDEX_MASK;
500
501 /* set full masks if the userspace set all bits
502 * in the bitfields */
503 if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK)
504 se_num = 0xffffffff;
505 if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
506 sh_num = 0xffffffff;
507
508 if (info->read_mmr_reg.count > 128)
509 return -EINVAL;
510
511 regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
512 if (!regs)
513 return -ENOMEM;
514 alloc_size = info->read_mmr_reg.count * sizeof(*regs);
515
516 for (i = 0; i < info->read_mmr_reg.count; i++)
517 if (amdgpu_asic_read_register(adev, se_num, sh_num,
518 info->read_mmr_reg.dword_offset + i,
519 ®s[i])) {
520 DRM_DEBUG_KMS("unallowed offset %#x\n",
521 info->read_mmr_reg.dword_offset + i);
522 kfree(regs);
523 return -EFAULT;
524 }
525 n = copy_to_user(out, regs, min(size, alloc_size));
526 kfree(regs);
527 return n ? -EFAULT : 0;
528 }
529 case AMDGPU_INFO_DEV_INFO: {
530 struct drm_amdgpu_info_device dev_info = {};
531
532 dev_info.device_id = dev->pdev->device;
533 dev_info.chip_rev = adev->rev_id;
534 dev_info.external_rev = adev->external_rev_id;
535 dev_info.pci_rev = dev->pdev->revision;
536 dev_info.family = adev->family;
537 dev_info.num_shader_engines = adev->gfx.config.max_shader_engines;
538 dev_info.num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
539 /* return all clocks in KHz */
540 dev_info.gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10;
541 if (adev->pm.dpm_enabled) {
542 dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
543 dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
544 } else {
545 dev_info.max_engine_clock = adev->clock.default_sclk * 10;
546 dev_info.max_memory_clock = adev->clock.default_mclk * 10;
547 }
548 dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask;
549 dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se *
550 adev->gfx.config.max_shader_engines;
551 dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
552 dev_info._pad = 0;
553 dev_info.ids_flags = 0;
554 if (adev->flags & AMD_IS_APU)
555 dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
556 if (amdgpu_sriov_vf(adev))
557 dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
558 dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
559 dev_info.virtual_address_max = (uint64_t)adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
560 dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
561 dev_info.pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE;
562 dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE;
563 dev_info.cu_active_number = adev->gfx.cu_info.number;
564 dev_info.cu_ao_mask = adev->gfx.cu_info.ao_cu_mask;
565 dev_info.ce_ram_size = adev->gfx.ce_ram_size;
566 memcpy(&dev_info.cu_ao_bitmap[0], &adev->gfx.cu_info.ao_cu_bitmap[0],
567 sizeof(adev->gfx.cu_info.ao_cu_bitmap));
568 memcpy(&dev_info.cu_bitmap[0], &adev->gfx.cu_info.bitmap[0],
569 sizeof(adev->gfx.cu_info.bitmap));
570 dev_info.vram_type = adev->mc.vram_type;
571 dev_info.vram_bit_width = adev->mc.vram_width;
572 dev_info.vce_harvest_config = adev->vce.harvest_config;
573 dev_info.gc_double_offchip_lds_buf =
574 adev->gfx.config.double_offchip_lds_buf;
575
576 if (amdgpu_ngg) {
577 dev_info.prim_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PRIM].gpu_addr;
578 dev_info.prim_buf_size = adev->gfx.ngg.buf[NGG_PRIM].size;
579 dev_info.pos_buf_gpu_addr = adev->gfx.ngg.buf[NGG_POS].gpu_addr;
580 dev_info.pos_buf_size = adev->gfx.ngg.buf[NGG_POS].size;
581 dev_info.cntl_sb_buf_gpu_addr = adev->gfx.ngg.buf[NGG_CNTL].gpu_addr;
582 dev_info.cntl_sb_buf_size = adev->gfx.ngg.buf[NGG_CNTL].size;
583 dev_info.param_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PARAM].gpu_addr;
584 dev_info.param_buf_size = adev->gfx.ngg.buf[NGG_PARAM].size;
585 }
586 dev_info.wave_front_size = adev->gfx.cu_info.wave_front_size;
587 dev_info.num_shader_visible_vgprs = adev->gfx.config.max_gprs;
588 dev_info.num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
589 dev_info.num_tcc_blocks = adev->gfx.config.max_texture_channel_caches;
590 dev_info.gs_vgt_table_depth = adev->gfx.config.gs_vgt_table_depth;
591 dev_info.gs_prim_buffer_depth = adev->gfx.config.gs_prim_buffer_depth;
592 dev_info.max_gs_waves_per_vgt = adev->gfx.config.max_gs_threads;
593
594 return copy_to_user(out, &dev_info,
595 min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
596 }
597 case AMDGPU_INFO_VCE_CLOCK_TABLE: {
598 unsigned i;
599 struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
600 struct amd_vce_state *vce_state;
601
602 for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) {
603 vce_state = amdgpu_dpm_get_vce_clock_state(adev, i);
604 if (vce_state) {
605 vce_clk_table.entries[i].sclk = vce_state->sclk;
606 vce_clk_table.entries[i].mclk = vce_state->mclk;
607 vce_clk_table.entries[i].eclk = vce_state->evclk;
608 vce_clk_table.num_valid_entries++;
609 }
610 }
611
612 return copy_to_user(out, &vce_clk_table,
613 min((size_t)size, sizeof(vce_clk_table))) ? -EFAULT : 0;
614 }
615 case AMDGPU_INFO_VBIOS: {
616 uint32_t bios_size = adev->bios_size;
617
618 switch (info->vbios_info.type) {
619 case AMDGPU_INFO_VBIOS_SIZE:
620 return copy_to_user(out, &bios_size,
621 min((size_t)size, sizeof(bios_size)))
622 ? -EFAULT : 0;
623 case AMDGPU_INFO_VBIOS_IMAGE: {
624 uint8_t *bios;
625 uint32_t bios_offset = info->vbios_info.offset;
626
627 if (bios_offset >= bios_size)
628 return -EINVAL;
629
630 bios = adev->bios + bios_offset;
631 return copy_to_user(out, bios,
632 min((size_t)size, (size_t)(bios_size - bios_offset)))
633 ? -EFAULT : 0;
634 }
635 default:
636 DRM_DEBUG_KMS("Invalid request %d\n",
637 info->vbios_info.type);
638 return -EINVAL;
639 }
640 }
641 case AMDGPU_INFO_NUM_HANDLES: {
642 struct drm_amdgpu_info_num_handles handle;
643
644 switch (info->query_hw_ip.type) {
645 case AMDGPU_HW_IP_UVD:
646 /* Starting Polaris, we support unlimited UVD handles */
647 if (adev->asic_type < CHIP_POLARIS10) {
648 handle.uvd_max_handles = adev->uvd.max_handles;
649 handle.uvd_used_handles = amdgpu_uvd_used_handles(adev);
650
651 return copy_to_user(out, &handle,
652 min((size_t)size, sizeof(handle))) ? -EFAULT : 0;
653 } else {
654 return -ENODATA;
655 }
656
657 break;
658 default:
659 return -EINVAL;
660 }
661 }
662 case AMDGPU_INFO_SENSOR: {
663 struct pp_gpu_power query = {0};
664 int query_size = sizeof(query);
665
666 if (amdgpu_dpm == 0)
667 return -ENOENT;
668
669 switch (info->sensor_info.type) {
670 case AMDGPU_INFO_SENSOR_GFX_SCLK:
671 /* get sclk in Mhz */
672 if (amdgpu_dpm_read_sensor(adev,
673 AMDGPU_PP_SENSOR_GFX_SCLK,
674 (void *)&ui32, &ui32_size)) {
675 return -EINVAL;
676 }
677 ui32 /= 100;
678 break;
679 case AMDGPU_INFO_SENSOR_GFX_MCLK:
680 /* get mclk in Mhz */
681 if (amdgpu_dpm_read_sensor(adev,
682 AMDGPU_PP_SENSOR_GFX_MCLK,
683 (void *)&ui32, &ui32_size)) {
684 return -EINVAL;
685 }
686 ui32 /= 100;
687 break;
688 case AMDGPU_INFO_SENSOR_GPU_TEMP:
689 /* get temperature in millidegrees C */
690 if (amdgpu_dpm_read_sensor(adev,
691 AMDGPU_PP_SENSOR_GPU_TEMP,
692 (void *)&ui32, &ui32_size)) {
693 return -EINVAL;
694 }
695 break;
696 case AMDGPU_INFO_SENSOR_GPU_LOAD:
697 /* get GPU load */
698 if (amdgpu_dpm_read_sensor(adev,
699 AMDGPU_PP_SENSOR_GPU_LOAD,
700 (void *)&ui32, &ui32_size)) {
701 return -EINVAL;
702 }
703 break;
704 case AMDGPU_INFO_SENSOR_GPU_AVG_POWER:
705 /* get average GPU power */
706 if (amdgpu_dpm_read_sensor(adev,
707 AMDGPU_PP_SENSOR_GPU_POWER,
708 (void *)&query, &query_size)) {
709 return -EINVAL;
710 }
711 ui32 = query.average_gpu_power >> 8;
712 break;
713 case AMDGPU_INFO_SENSOR_VDDNB:
714 /* get VDDNB in millivolts */
715 if (amdgpu_dpm_read_sensor(adev,
716 AMDGPU_PP_SENSOR_VDDNB,
717 (void *)&ui32, &ui32_size)) {
718 return -EINVAL;
719 }
720 break;
721 case AMDGPU_INFO_SENSOR_VDDGFX:
722 /* get VDDGFX in millivolts */
723 if (amdgpu_dpm_read_sensor(adev,
724 AMDGPU_PP_SENSOR_VDDGFX,
725 (void *)&ui32, &ui32_size)) {
726 return -EINVAL;
727 }
728 break;
729 default:
730 DRM_DEBUG_KMS("Invalid request %d\n",
731 info->sensor_info.type);
732 return -EINVAL;
733 }
734 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
735 }
736 default:
737 DRM_DEBUG_KMS("Invalid request %d\n", info->query);
738 return -EINVAL;
739 }
740 return 0;
741 }
742
743
744 /*
745 * Outdated mess for old drm with Xorg being in charge (void function now).
746 */
747 /**
748 * amdgpu_driver_lastclose_kms - drm callback for last close
749 *
750 * @dev: drm dev pointer
751 *
752 * Switch vga_switcheroo state after last close (all asics).
753 */
amdgpu_driver_lastclose_kms(struct drm_device * dev)754 void amdgpu_driver_lastclose_kms(struct drm_device *dev)
755 {
756 struct amdgpu_device *adev = dev->dev_private;
757
758 amdgpu_fbdev_restore_mode(adev);
759 vga_switcheroo_process_delayed_switch();
760 }
761
amdgpu_kms_vram_lost(struct amdgpu_device * adev,struct amdgpu_fpriv * fpriv)762 bool amdgpu_kms_vram_lost(struct amdgpu_device *adev,
763 struct amdgpu_fpriv *fpriv)
764 {
765 return fpriv->vram_lost_counter != atomic_read(&adev->vram_lost_counter);
766 }
767
768 /**
769 * amdgpu_driver_open_kms - drm callback for open
770 *
771 * @dev: drm dev pointer
772 * @file_priv: drm file
773 *
774 * On device open, init vm on cayman+ (all asics).
775 * Returns 0 on success, error on failure.
776 */
amdgpu_driver_open_kms(struct drm_device * dev,struct drm_file * file_priv)777 int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
778 {
779 struct amdgpu_device *adev = dev->dev_private;
780 struct amdgpu_fpriv *fpriv;
781 int r;
782
783 file_priv->driver_priv = NULL;
784
785 r = pm_runtime_get_sync(dev->dev);
786 if (r < 0)
787 return r;
788
789 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
790 if (unlikely(!fpriv)) {
791 r = -ENOMEM;
792 goto out_suspend;
793 }
794
795 r = amdgpu_vm_init(adev, &fpriv->vm,
796 AMDGPU_VM_CONTEXT_GFX);
797 if (r) {
798 kfree(fpriv);
799 goto out_suspend;
800 }
801
802 fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL);
803 if (!fpriv->prt_va) {
804 r = -ENOMEM;
805 amdgpu_vm_fini(adev, &fpriv->vm);
806 kfree(fpriv);
807 goto out_suspend;
808 }
809
810 if (amdgpu_sriov_vf(adev)) {
811 r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va);
812 if (r)
813 goto out_suspend;
814 }
815
816 mutex_init(&fpriv->bo_list_lock);
817 idr_init(&fpriv->bo_list_handles);
818
819 amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
820
821 fpriv->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
822 file_priv->driver_priv = fpriv;
823
824 out_suspend:
825 pm_runtime_mark_last_busy(dev->dev);
826 pm_runtime_put_autosuspend(dev->dev);
827
828 return r;
829 }
830
831 /**
832 * amdgpu_driver_postclose_kms - drm callback for post close
833 *
834 * @dev: drm dev pointer
835 * @file_priv: drm file
836 *
837 * On device post close, tear down vm on cayman+ (all asics).
838 */
amdgpu_driver_postclose_kms(struct drm_device * dev,struct drm_file * file_priv)839 void amdgpu_driver_postclose_kms(struct drm_device *dev,
840 struct drm_file *file_priv)
841 {
842 struct amdgpu_device *adev = dev->dev_private;
843 struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
844 struct amdgpu_bo_list *list;
845 int handle;
846
847 if (!fpriv)
848 return;
849
850 pm_runtime_get_sync(dev->dev);
851
852 amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
853
854 if (adev->asic_type != CHIP_RAVEN) {
855 amdgpu_uvd_free_handles(adev, file_priv);
856 amdgpu_vce_free_handles(adev, file_priv);
857 }
858
859 amdgpu_vm_bo_rmv(adev, fpriv->prt_va);
860
861 if (amdgpu_sriov_vf(adev)) {
862 /* TODO: how to handle reserve failure */
863 BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
864 amdgpu_vm_bo_rmv(adev, fpriv->csa_va);
865 fpriv->csa_va = NULL;
866 amdgpu_bo_unreserve(adev->virt.csa_obj);
867 }
868
869 amdgpu_vm_fini(adev, &fpriv->vm);
870
871 idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
872 amdgpu_bo_list_free(list);
873
874 idr_destroy(&fpriv->bo_list_handles);
875 mutex_destroy(&fpriv->bo_list_lock);
876
877 kfree(fpriv);
878 file_priv->driver_priv = NULL;
879
880 pm_runtime_mark_last_busy(dev->dev);
881 pm_runtime_put_autosuspend(dev->dev);
882 }
883
884 /*
885 * VBlank related functions.
886 */
887 /**
888 * amdgpu_get_vblank_counter_kms - get frame count
889 *
890 * @dev: drm dev pointer
891 * @pipe: crtc to get the frame count from
892 *
893 * Gets the frame count on the requested crtc (all asics).
894 * Returns frame count on success, -EINVAL on failure.
895 */
amdgpu_get_vblank_counter_kms(struct drm_device * dev,unsigned int pipe)896 u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
897 {
898 struct amdgpu_device *adev = dev->dev_private;
899 int vpos, hpos, stat;
900 u32 count;
901
902 if (pipe >= adev->mode_info.num_crtc) {
903 DRM_ERROR("Invalid crtc %u\n", pipe);
904 return -EINVAL;
905 }
906
907 /* The hw increments its frame counter at start of vsync, not at start
908 * of vblank, as is required by DRM core vblank counter handling.
909 * Cook the hw count here to make it appear to the caller as if it
910 * incremented at start of vblank. We measure distance to start of
911 * vblank in vpos. vpos therefore will be >= 0 between start of vblank
912 * and start of vsync, so vpos >= 0 means to bump the hw frame counter
913 * result by 1 to give the proper appearance to caller.
914 */
915 if (adev->mode_info.crtcs[pipe]) {
916 /* Repeat readout if needed to provide stable result if
917 * we cross start of vsync during the queries.
918 */
919 do {
920 count = amdgpu_display_vblank_get_counter(adev, pipe);
921 /* Ask amdgpu_get_crtc_scanoutpos to return vpos as
922 * distance to start of vblank, instead of regular
923 * vertical scanout pos.
924 */
925 stat = amdgpu_get_crtc_scanoutpos(
926 dev, pipe, GET_DISTANCE_TO_VBLANKSTART,
927 &vpos, &hpos, NULL, NULL,
928 &adev->mode_info.crtcs[pipe]->base.hwmode);
929 } while (count != amdgpu_display_vblank_get_counter(adev, pipe));
930
931 if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
932 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
933 DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
934 } else {
935 DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n",
936 pipe, vpos);
937
938 /* Bump counter if we are at >= leading edge of vblank,
939 * but before vsync where vpos would turn negative and
940 * the hw counter really increments.
941 */
942 if (vpos >= 0)
943 count++;
944 }
945 } else {
946 /* Fallback to use value as is. */
947 count = amdgpu_display_vblank_get_counter(adev, pipe);
948 DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
949 }
950
951 return count;
952 }
953
954 /**
955 * amdgpu_enable_vblank_kms - enable vblank interrupt
956 *
957 * @dev: drm dev pointer
958 * @pipe: crtc to enable vblank interrupt for
959 *
960 * Enable the interrupt on the requested crtc (all asics).
961 * Returns 0 on success, -EINVAL on failure.
962 */
amdgpu_enable_vblank_kms(struct drm_device * dev,unsigned int pipe)963 int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe)
964 {
965 struct amdgpu_device *adev = dev->dev_private;
966 int idx = amdgpu_crtc_idx_to_irq_type(adev, pipe);
967
968 return amdgpu_irq_get(adev, &adev->crtc_irq, idx);
969 }
970
971 /**
972 * amdgpu_disable_vblank_kms - disable vblank interrupt
973 *
974 * @dev: drm dev pointer
975 * @pipe: crtc to disable vblank interrupt for
976 *
977 * Disable the interrupt on the requested crtc (all asics).
978 */
amdgpu_disable_vblank_kms(struct drm_device * dev,unsigned int pipe)979 void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe)
980 {
981 struct amdgpu_device *adev = dev->dev_private;
982 int idx = amdgpu_crtc_idx_to_irq_type(adev, pipe);
983
984 amdgpu_irq_put(adev, &adev->crtc_irq, idx);
985 }
986
987 const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
988 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
989 DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
990 DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
991 DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
992 /* KMS */
993 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
994 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
995 DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
996 DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
997 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
998 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
999 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1000 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1001 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1002 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1003 };
1004 const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
1005
1006 /*
1007 * Debugfs info
1008 */
1009 #if defined(CONFIG_DEBUG_FS)
1010
amdgpu_debugfs_firmware_info(struct seq_file * m,void * data)1011 static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
1012 {
1013 struct drm_info_node *node = (struct drm_info_node *) m->private;
1014 struct drm_device *dev = node->minor->dev;
1015 struct amdgpu_device *adev = dev->dev_private;
1016 struct drm_amdgpu_info_firmware fw_info;
1017 struct drm_amdgpu_query_fw query_fw;
1018 int ret, i;
1019
1020 /* VCE */
1021 query_fw.fw_type = AMDGPU_INFO_FW_VCE;
1022 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1023 if (ret)
1024 return ret;
1025 seq_printf(m, "VCE feature version: %u, firmware version: 0x%08x\n",
1026 fw_info.feature, fw_info.ver);
1027
1028 /* UVD */
1029 query_fw.fw_type = AMDGPU_INFO_FW_UVD;
1030 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1031 if (ret)
1032 return ret;
1033 seq_printf(m, "UVD feature version: %u, firmware version: 0x%08x\n",
1034 fw_info.feature, fw_info.ver);
1035
1036 /* GMC */
1037 query_fw.fw_type = AMDGPU_INFO_FW_GMC;
1038 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1039 if (ret)
1040 return ret;
1041 seq_printf(m, "MC feature version: %u, firmware version: 0x%08x\n",
1042 fw_info.feature, fw_info.ver);
1043
1044 /* ME */
1045 query_fw.fw_type = AMDGPU_INFO_FW_GFX_ME;
1046 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1047 if (ret)
1048 return ret;
1049 seq_printf(m, "ME feature version: %u, firmware version: 0x%08x\n",
1050 fw_info.feature, fw_info.ver);
1051
1052 /* PFP */
1053 query_fw.fw_type = AMDGPU_INFO_FW_GFX_PFP;
1054 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1055 if (ret)
1056 return ret;
1057 seq_printf(m, "PFP feature version: %u, firmware version: 0x%08x\n",
1058 fw_info.feature, fw_info.ver);
1059
1060 /* CE */
1061 query_fw.fw_type = AMDGPU_INFO_FW_GFX_CE;
1062 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1063 if (ret)
1064 return ret;
1065 seq_printf(m, "CE feature version: %u, firmware version: 0x%08x\n",
1066 fw_info.feature, fw_info.ver);
1067
1068 /* RLC */
1069 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC;
1070 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1071 if (ret)
1072 return ret;
1073 seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n",
1074 fw_info.feature, fw_info.ver);
1075
1076 /* MEC */
1077 query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC;
1078 query_fw.index = 0;
1079 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1080 if (ret)
1081 return ret;
1082 seq_printf(m, "MEC feature version: %u, firmware version: 0x%08x\n",
1083 fw_info.feature, fw_info.ver);
1084
1085 /* MEC2 */
1086 if (adev->asic_type == CHIP_KAVERI ||
1087 (adev->asic_type > CHIP_TOPAZ && adev->asic_type != CHIP_STONEY)) {
1088 query_fw.index = 1;
1089 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1090 if (ret)
1091 return ret;
1092 seq_printf(m, "MEC2 feature version: %u, firmware version: 0x%08x\n",
1093 fw_info.feature, fw_info.ver);
1094 }
1095
1096 /* PSP SOS */
1097 query_fw.fw_type = AMDGPU_INFO_FW_SOS;
1098 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1099 if (ret)
1100 return ret;
1101 seq_printf(m, "SOS feature version: %u, firmware version: 0x%08x\n",
1102 fw_info.feature, fw_info.ver);
1103
1104
1105 /* PSP ASD */
1106 query_fw.fw_type = AMDGPU_INFO_FW_ASD;
1107 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1108 if (ret)
1109 return ret;
1110 seq_printf(m, "ASD feature version: %u, firmware version: 0x%08x\n",
1111 fw_info.feature, fw_info.ver);
1112
1113 /* SMC */
1114 query_fw.fw_type = AMDGPU_INFO_FW_SMC;
1115 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1116 if (ret)
1117 return ret;
1118 seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x\n",
1119 fw_info.feature, fw_info.ver);
1120
1121 /* SDMA */
1122 query_fw.fw_type = AMDGPU_INFO_FW_SDMA;
1123 for (i = 0; i < adev->sdma.num_instances; i++) {
1124 query_fw.index = i;
1125 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1126 if (ret)
1127 return ret;
1128 seq_printf(m, "SDMA%d feature version: %u, firmware version: 0x%08x\n",
1129 i, fw_info.feature, fw_info.ver);
1130 }
1131
1132 return 0;
1133 }
1134
1135 static const struct drm_info_list amdgpu_firmware_info_list[] = {
1136 {"amdgpu_firmware_info", amdgpu_debugfs_firmware_info, 0, NULL},
1137 };
1138 #endif
1139
amdgpu_debugfs_firmware_init(struct amdgpu_device * adev)1140 int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev)
1141 {
1142 #if defined(CONFIG_DEBUG_FS)
1143 return amdgpu_debugfs_add_files(adev, amdgpu_firmware_info_list,
1144 ARRAY_SIZE(amdgpu_firmware_info_list));
1145 #else
1146 return 0;
1147 #endif
1148 }
1149