• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 
29 #include <linux/pci.h>
30 #include <linux/pm_runtime.h>
31 #include <linux/slab.h>
32 #include <linux/uaccess.h>
33 #include <linux/vga_switcheroo.h>
34 
35 #include <drm/drm_agpsupport.h>
36 #include <drm/drm_fb_helper.h>
37 #include <drm/drm_file.h>
38 #include <drm/drm_ioctl.h>
39 #include <drm/radeon_drm.h>
40 
41 #include "radeon.h"
42 #include "radeon_asic.h"
43 
44 #if defined(CONFIG_VGA_SWITCHEROO)
45 bool radeon_has_atpx(void);
46 #else
radeon_has_atpx(void)47 static inline bool radeon_has_atpx(void) { return false; }
48 #endif
49 
50 /**
51  * radeon_driver_unload_kms - Main unload function for KMS.
52  *
53  * @dev: drm dev pointer
54  *
55  * This is the main unload function for KMS (all asics).
56  * It calls radeon_modeset_fini() to tear down the
57  * displays, and radeon_device_fini() to tear down
58  * the rest of the device (CP, writeback, etc.).
59  * Returns 0 on success.
60  */
radeon_driver_unload_kms(struct drm_device * dev)61 void radeon_driver_unload_kms(struct drm_device *dev)
62 {
63 	struct radeon_device *rdev = dev->dev_private;
64 
65 	if (rdev == NULL)
66 		return;
67 
68 	if (rdev->rmmio == NULL)
69 		goto done_free;
70 
71 	if (radeon_is_px(dev)) {
72 		pm_runtime_get_sync(dev->dev);
73 		pm_runtime_forbid(dev->dev);
74 	}
75 
76 	radeon_acpi_fini(rdev);
77 
78 	radeon_modeset_fini(rdev);
79 	radeon_device_fini(rdev);
80 
81 	if (dev->agp)
82 		arch_phys_wc_del(dev->agp->agp_mtrr);
83 	kfree(dev->agp);
84 	dev->agp = NULL;
85 
86 done_free:
87 	kfree(rdev);
88 	dev->dev_private = NULL;
89 }
90 
91 /**
92  * radeon_driver_load_kms - Main load function for KMS.
93  *
94  * @dev: drm dev pointer
95  * @flags: device flags
96  *
97  * This is the main load function for KMS (all asics).
98  * It calls radeon_device_init() to set up the non-display
99  * parts of the chip (asic init, CP, writeback, etc.), and
100  * radeon_modeset_init() to set up the display parts
101  * (crtcs, encoders, hotplug detect, etc.).
102  * Returns 0 on success, error on failure.
103  */
radeon_driver_load_kms(struct drm_device * dev,unsigned long flags)104 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
105 {
106 	struct radeon_device *rdev;
107 	int r, acpi_status;
108 
109 	rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
110 	if (rdev == NULL) {
111 		return -ENOMEM;
112 	}
113 	dev->dev_private = (void *)rdev;
114 
115 	/* update BUS flag */
116 	if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP)) {
117 		flags |= RADEON_IS_AGP;
118 	} else if (pci_is_pcie(dev->pdev)) {
119 		flags |= RADEON_IS_PCIE;
120 	} else {
121 		flags |= RADEON_IS_PCI;
122 	}
123 
124 	if ((radeon_runtime_pm != 0) &&
125 	    radeon_has_atpx() &&
126 	    ((flags & RADEON_IS_IGP) == 0) &&
127 	    !pci_is_thunderbolt_attached(dev->pdev))
128 		flags |= RADEON_IS_PX;
129 
130 	/* radeon_device_init should report only fatal error
131 	 * like memory allocation failure or iomapping failure,
132 	 * or memory manager initialization failure, it must
133 	 * properly initialize the GPU MC controller and permit
134 	 * VRAM allocation
135 	 */
136 	r = radeon_device_init(rdev, dev, dev->pdev, flags);
137 	if (r) {
138 		dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
139 		goto out;
140 	}
141 
142 	/* Again modeset_init should fail only on fatal error
143 	 * otherwise it should provide enough functionalities
144 	 * for shadowfb to run
145 	 */
146 	r = radeon_modeset_init(rdev);
147 	if (r)
148 		dev_err(&dev->pdev->dev, "Fatal error during modeset init\n");
149 
150 	/* Call ACPI methods: require modeset init
151 	 * but failure is not fatal
152 	 */
153 	if (!r) {
154 		acpi_status = radeon_acpi_init(rdev);
155 		if (acpi_status)
156 		dev_dbg(&dev->pdev->dev,
157 				"Error during ACPI methods call\n");
158 	}
159 
160 	if (radeon_is_px(dev)) {
161 		dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
162 		pm_runtime_use_autosuspend(dev->dev);
163 		pm_runtime_set_autosuspend_delay(dev->dev, 5000);
164 		pm_runtime_set_active(dev->dev);
165 		pm_runtime_allow(dev->dev);
166 		pm_runtime_mark_last_busy(dev->dev);
167 		pm_runtime_put_autosuspend(dev->dev);
168 	}
169 
170 out:
171 	if (r)
172 		radeon_driver_unload_kms(dev);
173 
174 
175 	return r;
176 }
177 
178 /**
179  * radeon_set_filp_rights - Set filp right.
180  *
181  * @dev: drm dev pointer
182  * @owner: drm file
183  * @applier: drm file
184  * @value: value
185  *
186  * Sets the filp rights for the device (all asics).
187  */
radeon_set_filp_rights(struct drm_device * dev,struct drm_file ** owner,struct drm_file * applier,uint32_t * value)188 static void radeon_set_filp_rights(struct drm_device *dev,
189 				   struct drm_file **owner,
190 				   struct drm_file *applier,
191 				   uint32_t *value)
192 {
193 	struct radeon_device *rdev = dev->dev_private;
194 
195 	mutex_lock(&rdev->gem.mutex);
196 	if (*value == 1) {
197 		/* wants rights */
198 		if (!*owner)
199 			*owner = applier;
200 	} else if (*value == 0) {
201 		/* revokes rights */
202 		if (*owner == applier)
203 			*owner = NULL;
204 	}
205 	*value = *owner == applier ? 1 : 0;
206 	mutex_unlock(&rdev->gem.mutex);
207 }
208 
209 /*
210  * Userspace get information ioctl
211  */
212 /**
213  * radeon_info_ioctl - answer a device specific request.
214  *
215  * @rdev: radeon device pointer
216  * @data: request object
217  * @filp: drm filp
218  *
219  * This function is used to pass device specific parameters to the userspace
220  * drivers.  Examples include: pci device id, pipeline parms, tiling params,
221  * etc. (all asics).
222  * Returns 0 on success, -EINVAL on failure.
223  */
radeon_info_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)224 static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
225 {
226 	struct radeon_device *rdev = dev->dev_private;
227 	struct drm_radeon_info *info = data;
228 	struct radeon_mode_info *minfo = &rdev->mode_info;
229 	uint32_t *value, value_tmp, *value_ptr, value_size;
230 	uint64_t value64;
231 	struct drm_crtc *crtc;
232 	int i, found;
233 
234 	value_ptr = (uint32_t *)((unsigned long)info->value);
235 	value = &value_tmp;
236 	value_size = sizeof(uint32_t);
237 
238 	switch (info->request) {
239 	case RADEON_INFO_DEVICE_ID:
240 		*value = dev->pdev->device;
241 		break;
242 	case RADEON_INFO_NUM_GB_PIPES:
243 		*value = rdev->num_gb_pipes;
244 		break;
245 	case RADEON_INFO_NUM_Z_PIPES:
246 		*value = rdev->num_z_pipes;
247 		break;
248 	case RADEON_INFO_ACCEL_WORKING:
249 		/* xf86-video-ati 6.13.0 relies on this being false for evergreen */
250 		if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK))
251 			*value = false;
252 		else
253 			*value = rdev->accel_working;
254 		break;
255 	case RADEON_INFO_CRTC_FROM_ID:
256 		if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
257 			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
258 			return -EFAULT;
259 		}
260 		for (i = 0, found = 0; i < rdev->num_crtc; i++) {
261 			crtc = (struct drm_crtc *)minfo->crtcs[i];
262 			if (crtc && crtc->base.id == *value) {
263 				struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
264 				*value = radeon_crtc->crtc_id;
265 				found = 1;
266 				break;
267 			}
268 		}
269 		if (!found) {
270 			DRM_DEBUG_KMS("unknown crtc id %d\n", *value);
271 			return -EINVAL;
272 		}
273 		break;
274 	case RADEON_INFO_ACCEL_WORKING2:
275 		if (rdev->family == CHIP_HAWAII) {
276 			if (rdev->accel_working) {
277 				if (rdev->new_fw)
278 					*value = 3;
279 				else
280 					*value = 2;
281 			} else {
282 				*value = 0;
283 			}
284 		} else {
285 			*value = rdev->accel_working;
286 		}
287 		break;
288 	case RADEON_INFO_TILING_CONFIG:
289 		if (rdev->family >= CHIP_BONAIRE)
290 			*value = rdev->config.cik.tile_config;
291 		else if (rdev->family >= CHIP_TAHITI)
292 			*value = rdev->config.si.tile_config;
293 		else if (rdev->family >= CHIP_CAYMAN)
294 			*value = rdev->config.cayman.tile_config;
295 		else if (rdev->family >= CHIP_CEDAR)
296 			*value = rdev->config.evergreen.tile_config;
297 		else if (rdev->family >= CHIP_RV770)
298 			*value = rdev->config.rv770.tile_config;
299 		else if (rdev->family >= CHIP_R600)
300 			*value = rdev->config.r600.tile_config;
301 		else {
302 			DRM_DEBUG_KMS("tiling config is r6xx+ only!\n");
303 			return -EINVAL;
304 		}
305 		break;
306 	case RADEON_INFO_WANT_HYPERZ:
307 		/* The "value" here is both an input and output parameter.
308 		 * If the input value is 1, filp requests hyper-z access.
309 		 * If the input value is 0, filp revokes its hyper-z access.
310 		 *
311 		 * When returning, the value is 1 if filp owns hyper-z access,
312 		 * 0 otherwise. */
313 		if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
314 			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
315 			return -EFAULT;
316 		}
317 		if (*value >= 2) {
318 			DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", *value);
319 			return -EINVAL;
320 		}
321 		radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, value);
322 		break;
323 	case RADEON_INFO_WANT_CMASK:
324 		/* The same logic as Hyper-Z. */
325 		if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
326 			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
327 			return -EFAULT;
328 		}
329 		if (*value >= 2) {
330 			DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", *value);
331 			return -EINVAL;
332 		}
333 		radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, value);
334 		break;
335 	case RADEON_INFO_CLOCK_CRYSTAL_FREQ:
336 		/* return clock value in KHz */
337 		if (rdev->asic->get_xclk)
338 			*value = radeon_get_xclk(rdev) * 10;
339 		else
340 			*value = rdev->clock.spll.reference_freq * 10;
341 		break;
342 	case RADEON_INFO_NUM_BACKENDS:
343 		if (rdev->family >= CHIP_BONAIRE)
344 			*value = rdev->config.cik.max_backends_per_se *
345 				rdev->config.cik.max_shader_engines;
346 		else if (rdev->family >= CHIP_TAHITI)
347 			*value = rdev->config.si.max_backends_per_se *
348 				rdev->config.si.max_shader_engines;
349 		else if (rdev->family >= CHIP_CAYMAN)
350 			*value = rdev->config.cayman.max_backends_per_se *
351 				rdev->config.cayman.max_shader_engines;
352 		else if (rdev->family >= CHIP_CEDAR)
353 			*value = rdev->config.evergreen.max_backends;
354 		else if (rdev->family >= CHIP_RV770)
355 			*value = rdev->config.rv770.max_backends;
356 		else if (rdev->family >= CHIP_R600)
357 			*value = rdev->config.r600.max_backends;
358 		else {
359 			return -EINVAL;
360 		}
361 		break;
362 	case RADEON_INFO_NUM_TILE_PIPES:
363 		if (rdev->family >= CHIP_BONAIRE)
364 			*value = rdev->config.cik.max_tile_pipes;
365 		else if (rdev->family >= CHIP_TAHITI)
366 			*value = rdev->config.si.max_tile_pipes;
367 		else if (rdev->family >= CHIP_CAYMAN)
368 			*value = rdev->config.cayman.max_tile_pipes;
369 		else if (rdev->family >= CHIP_CEDAR)
370 			*value = rdev->config.evergreen.max_tile_pipes;
371 		else if (rdev->family >= CHIP_RV770)
372 			*value = rdev->config.rv770.max_tile_pipes;
373 		else if (rdev->family >= CHIP_R600)
374 			*value = rdev->config.r600.max_tile_pipes;
375 		else {
376 			return -EINVAL;
377 		}
378 		break;
379 	case RADEON_INFO_FUSION_GART_WORKING:
380 		*value = 1;
381 		break;
382 	case RADEON_INFO_BACKEND_MAP:
383 		if (rdev->family >= CHIP_BONAIRE)
384 			*value = rdev->config.cik.backend_map;
385 		else if (rdev->family >= CHIP_TAHITI)
386 			*value = rdev->config.si.backend_map;
387 		else if (rdev->family >= CHIP_CAYMAN)
388 			*value = rdev->config.cayman.backend_map;
389 		else if (rdev->family >= CHIP_CEDAR)
390 			*value = rdev->config.evergreen.backend_map;
391 		else if (rdev->family >= CHIP_RV770)
392 			*value = rdev->config.rv770.backend_map;
393 		else if (rdev->family >= CHIP_R600)
394 			*value = rdev->config.r600.backend_map;
395 		else {
396 			return -EINVAL;
397 		}
398 		break;
399 	case RADEON_INFO_VA_START:
400 		/* this is where we report if vm is supported or not */
401 		if (rdev->family < CHIP_CAYMAN)
402 			return -EINVAL;
403 		*value = RADEON_VA_RESERVED_SIZE;
404 		break;
405 	case RADEON_INFO_IB_VM_MAX_SIZE:
406 		/* this is where we report if vm is supported or not */
407 		if (rdev->family < CHIP_CAYMAN)
408 			return -EINVAL;
409 		*value = RADEON_IB_VM_MAX_SIZE;
410 		break;
411 	case RADEON_INFO_MAX_PIPES:
412 		if (rdev->family >= CHIP_BONAIRE)
413 			*value = rdev->config.cik.max_cu_per_sh;
414 		else if (rdev->family >= CHIP_TAHITI)
415 			*value = rdev->config.si.max_cu_per_sh;
416 		else if (rdev->family >= CHIP_CAYMAN)
417 			*value = rdev->config.cayman.max_pipes_per_simd;
418 		else if (rdev->family >= CHIP_CEDAR)
419 			*value = rdev->config.evergreen.max_pipes;
420 		else if (rdev->family >= CHIP_RV770)
421 			*value = rdev->config.rv770.max_pipes;
422 		else if (rdev->family >= CHIP_R600)
423 			*value = rdev->config.r600.max_pipes;
424 		else {
425 			return -EINVAL;
426 		}
427 		break;
428 	case RADEON_INFO_TIMESTAMP:
429 		if (rdev->family < CHIP_R600) {
430 			DRM_DEBUG_KMS("timestamp is r6xx+ only!\n");
431 			return -EINVAL;
432 		}
433 		value = (uint32_t*)&value64;
434 		value_size = sizeof(uint64_t);
435 		value64 = radeon_get_gpu_clock_counter(rdev);
436 		break;
437 	case RADEON_INFO_MAX_SE:
438 		if (rdev->family >= CHIP_BONAIRE)
439 			*value = rdev->config.cik.max_shader_engines;
440 		else if (rdev->family >= CHIP_TAHITI)
441 			*value = rdev->config.si.max_shader_engines;
442 		else if (rdev->family >= CHIP_CAYMAN)
443 			*value = rdev->config.cayman.max_shader_engines;
444 		else if (rdev->family >= CHIP_CEDAR)
445 			*value = rdev->config.evergreen.num_ses;
446 		else
447 			*value = 1;
448 		break;
449 	case RADEON_INFO_MAX_SH_PER_SE:
450 		if (rdev->family >= CHIP_BONAIRE)
451 			*value = rdev->config.cik.max_sh_per_se;
452 		else if (rdev->family >= CHIP_TAHITI)
453 			*value = rdev->config.si.max_sh_per_se;
454 		else
455 			return -EINVAL;
456 		break;
457 	case RADEON_INFO_FASTFB_WORKING:
458 		*value = rdev->fastfb_working;
459 		break;
460 	case RADEON_INFO_RING_WORKING:
461 		if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
462 			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
463 			return -EFAULT;
464 		}
465 		switch (*value) {
466 		case RADEON_CS_RING_GFX:
467 		case RADEON_CS_RING_COMPUTE:
468 			*value = rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready;
469 			break;
470 		case RADEON_CS_RING_DMA:
471 			*value = rdev->ring[R600_RING_TYPE_DMA_INDEX].ready;
472 			*value |= rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready;
473 			break;
474 		case RADEON_CS_RING_UVD:
475 			*value = rdev->ring[R600_RING_TYPE_UVD_INDEX].ready;
476 			break;
477 		case RADEON_CS_RING_VCE:
478 			*value = rdev->ring[TN_RING_TYPE_VCE1_INDEX].ready;
479 			break;
480 		default:
481 			return -EINVAL;
482 		}
483 		break;
484 	case RADEON_INFO_SI_TILE_MODE_ARRAY:
485 		if (rdev->family >= CHIP_BONAIRE) {
486 			value = rdev->config.cik.tile_mode_array;
487 			value_size = sizeof(uint32_t)*32;
488 		} else if (rdev->family >= CHIP_TAHITI) {
489 			value = rdev->config.si.tile_mode_array;
490 			value_size = sizeof(uint32_t)*32;
491 		} else {
492 			DRM_DEBUG_KMS("tile mode array is si+ only!\n");
493 			return -EINVAL;
494 		}
495 		break;
496 	case RADEON_INFO_CIK_MACROTILE_MODE_ARRAY:
497 		if (rdev->family >= CHIP_BONAIRE) {
498 			value = rdev->config.cik.macrotile_mode_array;
499 			value_size = sizeof(uint32_t)*16;
500 		} else {
501 			DRM_DEBUG_KMS("macrotile mode array is cik+ only!\n");
502 			return -EINVAL;
503 		}
504 		break;
505 	case RADEON_INFO_SI_CP_DMA_COMPUTE:
506 		*value = 1;
507 		break;
508 	case RADEON_INFO_SI_BACKEND_ENABLED_MASK:
509 		if (rdev->family >= CHIP_BONAIRE) {
510 			*value = rdev->config.cik.backend_enable_mask;
511 		} else if (rdev->family >= CHIP_TAHITI) {
512 			*value = rdev->config.si.backend_enable_mask;
513 		} else {
514 			DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n");
515 			return -EINVAL;
516 		}
517 		break;
518 	case RADEON_INFO_MAX_SCLK:
519 		if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
520 		    rdev->pm.dpm_enabled)
521 			*value = rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk * 10;
522 		else
523 			*value = rdev->pm.default_sclk * 10;
524 		break;
525 	case RADEON_INFO_VCE_FW_VERSION:
526 		*value = rdev->vce.fw_version;
527 		break;
528 	case RADEON_INFO_VCE_FB_VERSION:
529 		*value = rdev->vce.fb_version;
530 		break;
531 	case RADEON_INFO_NUM_BYTES_MOVED:
532 		value = (uint32_t*)&value64;
533 		value_size = sizeof(uint64_t);
534 		value64 = atomic64_read(&rdev->num_bytes_moved);
535 		break;
536 	case RADEON_INFO_VRAM_USAGE:
537 		value = (uint32_t*)&value64;
538 		value_size = sizeof(uint64_t);
539 		value64 = atomic64_read(&rdev->vram_usage);
540 		break;
541 	case RADEON_INFO_GTT_USAGE:
542 		value = (uint32_t*)&value64;
543 		value_size = sizeof(uint64_t);
544 		value64 = atomic64_read(&rdev->gtt_usage);
545 		break;
546 	case RADEON_INFO_ACTIVE_CU_COUNT:
547 		if (rdev->family >= CHIP_BONAIRE)
548 			*value = rdev->config.cik.active_cus;
549 		else if (rdev->family >= CHIP_TAHITI)
550 			*value = rdev->config.si.active_cus;
551 		else if (rdev->family >= CHIP_CAYMAN)
552 			*value = rdev->config.cayman.active_simds;
553 		else if (rdev->family >= CHIP_CEDAR)
554 			*value = rdev->config.evergreen.active_simds;
555 		else if (rdev->family >= CHIP_RV770)
556 			*value = rdev->config.rv770.active_simds;
557 		else if (rdev->family >= CHIP_R600)
558 			*value = rdev->config.r600.active_simds;
559 		else
560 			*value = 1;
561 		break;
562 	case RADEON_INFO_CURRENT_GPU_TEMP:
563 		/* get temperature in millidegrees C */
564 		if (rdev->asic->pm.get_temperature)
565 			*value = radeon_get_temperature(rdev);
566 		else
567 			*value = 0;
568 		break;
569 	case RADEON_INFO_CURRENT_GPU_SCLK:
570 		/* get sclk in Mhz */
571 		if (rdev->pm.dpm_enabled)
572 			*value = radeon_dpm_get_current_sclk(rdev) / 100;
573 		else
574 			*value = rdev->pm.current_sclk / 100;
575 		break;
576 	case RADEON_INFO_CURRENT_GPU_MCLK:
577 		/* get mclk in Mhz */
578 		if (rdev->pm.dpm_enabled)
579 			*value = radeon_dpm_get_current_mclk(rdev) / 100;
580 		else
581 			*value = rdev->pm.current_mclk / 100;
582 		break;
583 	case RADEON_INFO_READ_REG:
584 		if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
585 			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
586 			return -EFAULT;
587 		}
588 		if (radeon_get_allowed_info_register(rdev, *value, value))
589 			return -EINVAL;
590 		break;
591 	case RADEON_INFO_VA_UNMAP_WORKING:
592 		*value = true;
593 		break;
594 	case RADEON_INFO_GPU_RESET_COUNTER:
595 		*value = atomic_read(&rdev->gpu_reset_counter);
596 		break;
597 	default:
598 		DRM_DEBUG_KMS("Invalid request %d\n", info->request);
599 		return -EINVAL;
600 	}
601 	if (copy_to_user(value_ptr, (char*)value, value_size)) {
602 		DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
603 		return -EFAULT;
604 	}
605 	return 0;
606 }
607 
608 
609 /*
610  * Outdated mess for old drm with Xorg being in charge (void function now).
611  */
612 /**
613  * radeon_driver_lastclose_kms - drm callback for last close
614  *
615  * @dev: drm dev pointer
616  *
617  * Switch vga_switcheroo state after last close (all asics).
618  */
radeon_driver_lastclose_kms(struct drm_device * dev)619 void radeon_driver_lastclose_kms(struct drm_device *dev)
620 {
621 	drm_fb_helper_lastclose(dev);
622 	vga_switcheroo_process_delayed_switch();
623 }
624 
625 /**
626  * radeon_driver_open_kms - drm callback for open
627  *
628  * @dev: drm dev pointer
629  * @file_priv: drm file
630  *
631  * On device open, init vm on cayman+ (all asics).
632  * Returns 0 on success, error on failure.
633  */
radeon_driver_open_kms(struct drm_device * dev,struct drm_file * file_priv)634 int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
635 {
636 	struct radeon_device *rdev = dev->dev_private;
637 	struct radeon_fpriv *fpriv;
638 	struct radeon_vm *vm;
639 	int r;
640 
641 	file_priv->driver_priv = NULL;
642 
643 	r = pm_runtime_get_sync(dev->dev);
644 	if (r < 0) {
645 		pm_runtime_put_autosuspend(dev->dev);
646 		return r;
647 	}
648 
649 	/* new gpu have virtual address space support */
650 	if (rdev->family >= CHIP_CAYMAN) {
651 
652 		fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
653 		if (unlikely(!fpriv)) {
654 			r = -ENOMEM;
655 			goto err_suspend;
656 		}
657 
658 		if (rdev->accel_working) {
659 			vm = &fpriv->vm;
660 			r = radeon_vm_init(rdev, vm);
661 			if (r)
662 				goto err_fpriv;
663 
664 			r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
665 			if (r)
666 				goto err_vm_fini;
667 
668 			/* map the ib pool buffer read only into
669 			 * virtual address space */
670 			vm->ib_bo_va = radeon_vm_bo_add(rdev, vm,
671 							rdev->ring_tmp_bo.bo);
672 			if (!vm->ib_bo_va) {
673 				r = -ENOMEM;
674 				goto err_vm_fini;
675 			}
676 
677 			r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va,
678 						  RADEON_VA_IB_OFFSET,
679 						  RADEON_VM_PAGE_READABLE |
680 						  RADEON_VM_PAGE_SNOOPED);
681 			if (r)
682 				goto err_vm_fini;
683 		}
684 		file_priv->driver_priv = fpriv;
685 	}
686 
687 	pm_runtime_mark_last_busy(dev->dev);
688 	pm_runtime_put_autosuspend(dev->dev);
689 	return 0;
690 
691 err_vm_fini:
692 	radeon_vm_fini(rdev, vm);
693 err_fpriv:
694 	kfree(fpriv);
695 
696 err_suspend:
697 	pm_runtime_mark_last_busy(dev->dev);
698 	pm_runtime_put_autosuspend(dev->dev);
699 	return r;
700 }
701 
702 /**
703  * radeon_driver_postclose_kms - drm callback for post close
704  *
705  * @dev: drm dev pointer
706  * @file_priv: drm file
707  *
708  * On device close, tear down hyperz and cmask filps on r1xx-r5xx
709  * (all asics).  And tear down vm on cayman+ (all asics).
710  */
radeon_driver_postclose_kms(struct drm_device * dev,struct drm_file * file_priv)711 void radeon_driver_postclose_kms(struct drm_device *dev,
712 				 struct drm_file *file_priv)
713 {
714 	struct radeon_device *rdev = dev->dev_private;
715 
716 	pm_runtime_get_sync(dev->dev);
717 
718 	mutex_lock(&rdev->gem.mutex);
719 	if (rdev->hyperz_filp == file_priv)
720 		rdev->hyperz_filp = NULL;
721 	if (rdev->cmask_filp == file_priv)
722 		rdev->cmask_filp = NULL;
723 	mutex_unlock(&rdev->gem.mutex);
724 
725 	radeon_uvd_free_handles(rdev, file_priv);
726 	radeon_vce_free_handles(rdev, file_priv);
727 
728 	/* new gpu have virtual address space support */
729 	if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
730 		struct radeon_fpriv *fpriv = file_priv->driver_priv;
731 		struct radeon_vm *vm = &fpriv->vm;
732 		int r;
733 
734 		if (rdev->accel_working) {
735 			r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
736 			if (!r) {
737 				if (vm->ib_bo_va)
738 					radeon_vm_bo_rmv(rdev, vm->ib_bo_va);
739 				radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
740 			}
741 			radeon_vm_fini(rdev, vm);
742 		}
743 
744 		kfree(fpriv);
745 		file_priv->driver_priv = NULL;
746 	}
747 	pm_runtime_mark_last_busy(dev->dev);
748 	pm_runtime_put_autosuspend(dev->dev);
749 }
750 
751 /*
752  * VBlank related functions.
753  */
754 /**
755  * radeon_get_vblank_counter_kms - get frame count
756  *
757  * @crtc: crtc to get the frame count from
758  *
759  * Gets the frame count on the requested crtc (all asics).
760  * Returns frame count on success, -EINVAL on failure.
761  */
radeon_get_vblank_counter_kms(struct drm_crtc * crtc)762 u32 radeon_get_vblank_counter_kms(struct drm_crtc *crtc)
763 {
764 	struct drm_device *dev = crtc->dev;
765 	unsigned int pipe = crtc->index;
766 	int vpos, hpos, stat;
767 	u32 count;
768 	struct radeon_device *rdev = dev->dev_private;
769 
770 	if (pipe >= rdev->num_crtc) {
771 		DRM_ERROR("Invalid crtc %u\n", pipe);
772 		return -EINVAL;
773 	}
774 
775 	/* The hw increments its frame counter at start of vsync, not at start
776 	 * of vblank, as is required by DRM core vblank counter handling.
777 	 * Cook the hw count here to make it appear to the caller as if it
778 	 * incremented at start of vblank. We measure distance to start of
779 	 * vblank in vpos. vpos therefore will be >= 0 between start of vblank
780 	 * and start of vsync, so vpos >= 0 means to bump the hw frame counter
781 	 * result by 1 to give the proper appearance to caller.
782 	 */
783 	if (rdev->mode_info.crtcs[pipe]) {
784 		/* Repeat readout if needed to provide stable result if
785 		 * we cross start of vsync during the queries.
786 		 */
787 		do {
788 			count = radeon_get_vblank_counter(rdev, pipe);
789 			/* Ask radeon_get_crtc_scanoutpos to return vpos as
790 			 * distance to start of vblank, instead of regular
791 			 * vertical scanout pos.
792 			 */
793 			stat = radeon_get_crtc_scanoutpos(
794 				dev, pipe, GET_DISTANCE_TO_VBLANKSTART,
795 				&vpos, &hpos, NULL, NULL,
796 				&rdev->mode_info.crtcs[pipe]->base.hwmode);
797 		} while (count != radeon_get_vblank_counter(rdev, pipe));
798 
799 		if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
800 		    (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
801 			DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
802 		}
803 		else {
804 			DRM_DEBUG_VBL("crtc %u: dist from vblank start %d\n",
805 				      pipe, vpos);
806 
807 			/* Bump counter if we are at >= leading edge of vblank,
808 			 * but before vsync where vpos would turn negative and
809 			 * the hw counter really increments.
810 			 */
811 			if (vpos >= 0)
812 				count++;
813 		}
814 	}
815 	else {
816 	    /* Fallback to use value as is. */
817 	    count = radeon_get_vblank_counter(rdev, pipe);
818 	    DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
819 	}
820 
821 	return count;
822 }
823 
824 /**
825  * radeon_enable_vblank_kms - enable vblank interrupt
826  *
827  * @crtc: crtc to enable vblank interrupt for
828  *
829  * Enable the interrupt on the requested crtc (all asics).
830  * Returns 0 on success, -EINVAL on failure.
831  */
radeon_enable_vblank_kms(struct drm_crtc * crtc)832 int radeon_enable_vblank_kms(struct drm_crtc *crtc)
833 {
834 	struct drm_device *dev = crtc->dev;
835 	unsigned int pipe = crtc->index;
836 	struct radeon_device *rdev = dev->dev_private;
837 	unsigned long irqflags;
838 	int r;
839 
840 	if (pipe >= rdev->num_crtc) {
841 		DRM_ERROR("Invalid crtc %d\n", pipe);
842 		return -EINVAL;
843 	}
844 
845 	spin_lock_irqsave(&rdev->irq.lock, irqflags);
846 	rdev->irq.crtc_vblank_int[pipe] = true;
847 	r = radeon_irq_set(rdev);
848 	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
849 	return r;
850 }
851 
852 /**
853  * radeon_disable_vblank_kms - disable vblank interrupt
854  *
855  * @crtc: crtc to disable vblank interrupt for
856  *
857  * Disable the interrupt on the requested crtc (all asics).
858  */
radeon_disable_vblank_kms(struct drm_crtc * crtc)859 void radeon_disable_vblank_kms(struct drm_crtc *crtc)
860 {
861 	struct drm_device *dev = crtc->dev;
862 	unsigned int pipe = crtc->index;
863 	struct radeon_device *rdev = dev->dev_private;
864 	unsigned long irqflags;
865 
866 	if (pipe >= rdev->num_crtc) {
867 		DRM_ERROR("Invalid crtc %d\n", pipe);
868 		return;
869 	}
870 
871 	spin_lock_irqsave(&rdev->irq.lock, irqflags);
872 	rdev->irq.crtc_vblank_int[pipe] = false;
873 	radeon_irq_set(rdev);
874 	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
875 }
876 
877 const struct drm_ioctl_desc radeon_ioctls_kms[] = {
878 	DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
879 	DRM_IOCTL_DEF_DRV(RADEON_CP_START, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
880 	DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
881 	DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
882 	DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, drm_invalid_op, DRM_AUTH),
883 	DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, drm_invalid_op, DRM_AUTH),
884 	DRM_IOCTL_DEF_DRV(RADEON_RESET, drm_invalid_op, DRM_AUTH),
885 	DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, drm_invalid_op, DRM_AUTH),
886 	DRM_IOCTL_DEF_DRV(RADEON_SWAP, drm_invalid_op, DRM_AUTH),
887 	DRM_IOCTL_DEF_DRV(RADEON_CLEAR, drm_invalid_op, DRM_AUTH),
888 	DRM_IOCTL_DEF_DRV(RADEON_VERTEX, drm_invalid_op, DRM_AUTH),
889 	DRM_IOCTL_DEF_DRV(RADEON_INDICES, drm_invalid_op, DRM_AUTH),
890 	DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, drm_invalid_op, DRM_AUTH),
891 	DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, drm_invalid_op, DRM_AUTH),
892 	DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
893 	DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, drm_invalid_op, DRM_AUTH),
894 	DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, drm_invalid_op, DRM_AUTH),
895 	DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, drm_invalid_op, DRM_AUTH),
896 	DRM_IOCTL_DEF_DRV(RADEON_FLIP, drm_invalid_op, DRM_AUTH),
897 	DRM_IOCTL_DEF_DRV(RADEON_ALLOC, drm_invalid_op, DRM_AUTH),
898 	DRM_IOCTL_DEF_DRV(RADEON_FREE, drm_invalid_op, DRM_AUTH),
899 	DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
900 	DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, drm_invalid_op, DRM_AUTH),
901 	DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, drm_invalid_op, DRM_AUTH),
902 	DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, drm_invalid_op, DRM_AUTH),
903 	DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, drm_invalid_op, DRM_AUTH),
904 	DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, drm_invalid_op, DRM_AUTH),
905 	/* KMS */
906 	DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
907 	DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
908 	DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
909 	DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
910 	DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH),
911 	DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH),
912 	DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
913 	DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
914 	DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
915 	DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
916 	DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
917 	DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
918 	DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
919 	DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
920 	DRM_IOCTL_DEF_DRV(RADEON_GEM_USERPTR, radeon_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
921 };
922 int radeon_max_kms_ioctl = ARRAY_SIZE(radeon_ioctls_kms);
923