• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2013 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "i915_drv.h"
25 #include "intel_drv.h"
26 #include "i915_vgpu.h"
27 
28 #include <asm/iosf_mbi.h>
29 #include <linux/pm_runtime.h>
30 
31 #define FORCEWAKE_ACK_TIMEOUT_MS 50
32 #define GT_FIFO_TIMEOUT_MS	 10
33 
34 #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__))
35 
36 static const char * const forcewake_domain_names[] = {
37 	"render",
38 	"blitter",
39 	"media",
40 };
41 
42 const char *
intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)43 intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
44 {
45 	BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
46 
47 	if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
48 		return forcewake_domain_names[id];
49 
50 	WARN_ON(id);
51 
52 	return "unknown";
53 }
54 
55 static inline void
fw_domain_reset(struct drm_i915_private * i915,const struct intel_uncore_forcewake_domain * d)56 fw_domain_reset(struct drm_i915_private *i915,
57 		const struct intel_uncore_forcewake_domain *d)
58 {
59 	__raw_i915_write32(i915, d->reg_set, i915->uncore.fw_reset);
60 }
61 
62 static inline void
fw_domain_arm_timer(struct intel_uncore_forcewake_domain * d)63 fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
64 {
65 	d->wake_count++;
66 	hrtimer_start_range_ns(&d->timer,
67 			       NSEC_PER_MSEC,
68 			       NSEC_PER_MSEC,
69 			       HRTIMER_MODE_REL);
70 }
71 
72 static inline void
fw_domain_wait_ack_clear(const struct drm_i915_private * i915,const struct intel_uncore_forcewake_domain * d)73 fw_domain_wait_ack_clear(const struct drm_i915_private *i915,
74 			 const struct intel_uncore_forcewake_domain *d)
75 {
76 	if (wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) &
77 			     FORCEWAKE_KERNEL) == 0,
78 			    FORCEWAKE_ACK_TIMEOUT_MS))
79 		DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
80 			  intel_uncore_forcewake_domain_to_str(d->id));
81 }
82 
83 static inline void
fw_domain_get(struct drm_i915_private * i915,const struct intel_uncore_forcewake_domain * d)84 fw_domain_get(struct drm_i915_private *i915,
85 	      const struct intel_uncore_forcewake_domain *d)
86 {
87 	__raw_i915_write32(i915, d->reg_set, i915->uncore.fw_set);
88 }
89 
90 static inline void
fw_domain_wait_ack(const struct drm_i915_private * i915,const struct intel_uncore_forcewake_domain * d)91 fw_domain_wait_ack(const struct drm_i915_private *i915,
92 		   const struct intel_uncore_forcewake_domain *d)
93 {
94 	if (wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) &
95 			     FORCEWAKE_KERNEL),
96 			    FORCEWAKE_ACK_TIMEOUT_MS))
97 		DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
98 			  intel_uncore_forcewake_domain_to_str(d->id));
99 }
100 
101 static inline void
fw_domain_put(const struct drm_i915_private * i915,const struct intel_uncore_forcewake_domain * d)102 fw_domain_put(const struct drm_i915_private *i915,
103 	      const struct intel_uncore_forcewake_domain *d)
104 {
105 	__raw_i915_write32(i915, d->reg_set, i915->uncore.fw_clear);
106 }
107 
108 static void
fw_domains_get(struct drm_i915_private * i915,enum forcewake_domains fw_domains)109 fw_domains_get(struct drm_i915_private *i915, enum forcewake_domains fw_domains)
110 {
111 	struct intel_uncore_forcewake_domain *d;
112 	unsigned int tmp;
113 
114 	GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
115 
116 	for_each_fw_domain_masked(d, fw_domains, i915, tmp) {
117 		fw_domain_wait_ack_clear(i915, d);
118 		fw_domain_get(i915, d);
119 	}
120 
121 	for_each_fw_domain_masked(d, fw_domains, i915, tmp)
122 		fw_domain_wait_ack(i915, d);
123 
124 	i915->uncore.fw_domains_active |= fw_domains;
125 }
126 
127 static void
fw_domains_put(struct drm_i915_private * i915,enum forcewake_domains fw_domains)128 fw_domains_put(struct drm_i915_private *i915, enum forcewake_domains fw_domains)
129 {
130 	struct intel_uncore_forcewake_domain *d;
131 	unsigned int tmp;
132 
133 	GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
134 
135 	for_each_fw_domain_masked(d, fw_domains, i915, tmp)
136 		fw_domain_put(i915, d);
137 
138 	i915->uncore.fw_domains_active &= ~fw_domains;
139 }
140 
141 static void
fw_domains_reset(struct drm_i915_private * i915,enum forcewake_domains fw_domains)142 fw_domains_reset(struct drm_i915_private *i915,
143 		 enum forcewake_domains fw_domains)
144 {
145 	struct intel_uncore_forcewake_domain *d;
146 	unsigned int tmp;
147 
148 	if (!fw_domains)
149 		return;
150 
151 	GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
152 
153 	for_each_fw_domain_masked(d, fw_domains, i915, tmp)
154 		fw_domain_reset(i915, d);
155 }
156 
__gen6_gt_wait_for_thread_c0(struct drm_i915_private * dev_priv)157 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
158 {
159 	/* w/a for a sporadic read returning 0 by waiting for the GT
160 	 * thread to wake up.
161 	 */
162 	if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
163 				GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
164 		DRM_ERROR("GT thread status wait timed out\n");
165 }
166 
fw_domains_get_with_thread_status(struct drm_i915_private * dev_priv,enum forcewake_domains fw_domains)167 static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
168 					      enum forcewake_domains fw_domains)
169 {
170 	fw_domains_get(dev_priv, fw_domains);
171 
172 	/* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
173 	__gen6_gt_wait_for_thread_c0(dev_priv);
174 }
175 
fifo_free_entries(struct drm_i915_private * dev_priv)176 static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
177 {
178 	u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
179 
180 	return count & GT_FIFO_FREE_ENTRIES_MASK;
181 }
182 
__gen6_gt_wait_for_fifo(struct drm_i915_private * dev_priv)183 static void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
184 {
185 	u32 n;
186 
187 	/* On VLV, FIFO will be shared by both SW and HW.
188 	 * So, we need to read the FREE_ENTRIES everytime */
189 	if (IS_VALLEYVIEW(dev_priv))
190 		n = fifo_free_entries(dev_priv);
191 	else
192 		n = dev_priv->uncore.fifo_count;
193 
194 	if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) {
195 		if (wait_for_atomic((n = fifo_free_entries(dev_priv)) >
196 				    GT_FIFO_NUM_RESERVED_ENTRIES,
197 				    GT_FIFO_TIMEOUT_MS)) {
198 			DRM_DEBUG("GT_FIFO timeout, entries: %u\n", n);
199 			return;
200 		}
201 	}
202 
203 	dev_priv->uncore.fifo_count = n - 1;
204 }
205 
206 static enum hrtimer_restart
intel_uncore_fw_release_timer(struct hrtimer * timer)207 intel_uncore_fw_release_timer(struct hrtimer *timer)
208 {
209 	struct intel_uncore_forcewake_domain *domain =
210 	       container_of(timer, struct intel_uncore_forcewake_domain, timer);
211 	struct drm_i915_private *dev_priv =
212 		container_of(domain, struct drm_i915_private, uncore.fw_domain[domain->id]);
213 	unsigned long irqflags;
214 
215 	assert_rpm_device_not_suspended(dev_priv);
216 
217 	if (xchg(&domain->active, false))
218 		return HRTIMER_RESTART;
219 
220 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
221 	if (WARN_ON(domain->wake_count == 0))
222 		domain->wake_count++;
223 
224 	if (--domain->wake_count == 0)
225 		dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask);
226 
227 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
228 
229 	return HRTIMER_NORESTART;
230 }
231 
intel_uncore_forcewake_reset(struct drm_i915_private * dev_priv,bool restore)232 static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
233 					 bool restore)
234 {
235 	unsigned long irqflags;
236 	struct intel_uncore_forcewake_domain *domain;
237 	int retry_count = 100;
238 	enum forcewake_domains fw, active_domains;
239 
240 	/* Hold uncore.lock across reset to prevent any register access
241 	 * with forcewake not set correctly. Wait until all pending
242 	 * timers are run before holding.
243 	 */
244 	while (1) {
245 		unsigned int tmp;
246 
247 		active_domains = 0;
248 
249 		for_each_fw_domain(domain, dev_priv, tmp) {
250 			smp_store_mb(domain->active, false);
251 			if (hrtimer_cancel(&domain->timer) == 0)
252 				continue;
253 
254 			intel_uncore_fw_release_timer(&domain->timer);
255 		}
256 
257 		spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
258 
259 		for_each_fw_domain(domain, dev_priv, tmp) {
260 			if (hrtimer_active(&domain->timer))
261 				active_domains |= domain->mask;
262 		}
263 
264 		if (active_domains == 0)
265 			break;
266 
267 		if (--retry_count == 0) {
268 			DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
269 			break;
270 		}
271 
272 		spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
273 		cond_resched();
274 	}
275 
276 	WARN_ON(active_domains);
277 
278 	fw = dev_priv->uncore.fw_domains_active;
279 	if (fw)
280 		dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
281 
282 	fw_domains_reset(dev_priv, dev_priv->uncore.fw_domains);
283 
284 	if (restore) { /* If reset with a user forcewake, try to restore */
285 		if (fw)
286 			dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
287 
288 		if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
289 			dev_priv->uncore.fifo_count =
290 				fifo_free_entries(dev_priv);
291 	}
292 
293 	if (!restore)
294 		assert_forcewakes_inactive(dev_priv);
295 
296 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
297 }
298 
gen9_edram_size(struct drm_i915_private * dev_priv)299 static u64 gen9_edram_size(struct drm_i915_private *dev_priv)
300 {
301 	const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
302 	const unsigned int sets[4] = { 1, 1, 2, 2 };
303 	const u32 cap = dev_priv->edram_cap;
304 
305 	return EDRAM_NUM_BANKS(cap) *
306 		ways[EDRAM_WAYS_IDX(cap)] *
307 		sets[EDRAM_SETS_IDX(cap)] *
308 		1024 * 1024;
309 }
310 
intel_uncore_edram_size(struct drm_i915_private * dev_priv)311 u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv)
312 {
313 	if (!HAS_EDRAM(dev_priv))
314 		return 0;
315 
316 	/* The needed capability bits for size calculation
317 	 * are not there with pre gen9 so return 128MB always.
318 	 */
319 	if (INTEL_GEN(dev_priv) < 9)
320 		return 128 * 1024 * 1024;
321 
322 	return gen9_edram_size(dev_priv);
323 }
324 
intel_uncore_edram_detect(struct drm_i915_private * dev_priv)325 static void intel_uncore_edram_detect(struct drm_i915_private *dev_priv)
326 {
327 	if (IS_HASWELL(dev_priv) ||
328 	    IS_BROADWELL(dev_priv) ||
329 	    INTEL_GEN(dev_priv) >= 9) {
330 		dev_priv->edram_cap = __raw_i915_read32(dev_priv,
331 							HSW_EDRAM_CAP);
332 
333 		/* NB: We can't write IDICR yet because we do not have gt funcs
334 		 * set up */
335 	} else {
336 		dev_priv->edram_cap = 0;
337 	}
338 
339 	if (HAS_EDRAM(dev_priv))
340 		DRM_INFO("Found %lluMB of eDRAM\n",
341 			 intel_uncore_edram_size(dev_priv) / (1024 * 1024));
342 }
343 
344 static bool
fpga_check_for_unclaimed_mmio(struct drm_i915_private * dev_priv)345 fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
346 {
347 	u32 dbg;
348 
349 	dbg = __raw_i915_read32(dev_priv, FPGA_DBG);
350 	if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
351 		return false;
352 
353 	__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
354 
355 	return true;
356 }
357 
358 static bool
vlv_check_for_unclaimed_mmio(struct drm_i915_private * dev_priv)359 vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
360 {
361 	u32 cer;
362 
363 	cer = __raw_i915_read32(dev_priv, CLAIM_ER);
364 	if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
365 		return false;
366 
367 	__raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR);
368 
369 	return true;
370 }
371 
372 static bool
gen6_check_for_fifo_debug(struct drm_i915_private * dev_priv)373 gen6_check_for_fifo_debug(struct drm_i915_private *dev_priv)
374 {
375 	u32 fifodbg;
376 
377 	fifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
378 
379 	if (unlikely(fifodbg)) {
380 		DRM_DEBUG_DRIVER("GTFIFODBG = 0x08%x\n", fifodbg);
381 		__raw_i915_write32(dev_priv, GTFIFODBG, fifodbg);
382 	}
383 
384 	return fifodbg;
385 }
386 
387 static bool
check_for_unclaimed_mmio(struct drm_i915_private * dev_priv)388 check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
389 {
390 	bool ret = false;
391 
392 	if (HAS_FPGA_DBG_UNCLAIMED(dev_priv))
393 		ret |= fpga_check_for_unclaimed_mmio(dev_priv);
394 
395 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
396 		ret |= vlv_check_for_unclaimed_mmio(dev_priv);
397 
398 	if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
399 		ret |= gen6_check_for_fifo_debug(dev_priv);
400 
401 	return ret;
402 }
403 
__intel_uncore_early_sanitize(struct drm_i915_private * dev_priv,bool restore_forcewake)404 static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
405 					  bool restore_forcewake)
406 {
407 	/* clear out unclaimed reg detection bit */
408 	if (check_for_unclaimed_mmio(dev_priv))
409 		DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
410 
411 	/* WaDisableShadowRegForCpd:chv */
412 	if (IS_CHERRYVIEW(dev_priv)) {
413 		__raw_i915_write32(dev_priv, GTFIFOCTL,
414 				   __raw_i915_read32(dev_priv, GTFIFOCTL) |
415 				   GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
416 				   GT_FIFO_CTL_RC6_POLICY_STALL);
417 	}
418 
419 	intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
420 }
421 
intel_uncore_suspend(struct drm_i915_private * dev_priv)422 void intel_uncore_suspend(struct drm_i915_private *dev_priv)
423 {
424 	iosf_mbi_unregister_pmic_bus_access_notifier(
425 		&dev_priv->uncore.pmic_bus_access_nb);
426 	intel_uncore_forcewake_reset(dev_priv, false);
427 }
428 
intel_uncore_resume_early(struct drm_i915_private * dev_priv)429 void intel_uncore_resume_early(struct drm_i915_private *dev_priv)
430 {
431 	__intel_uncore_early_sanitize(dev_priv, true);
432 	iosf_mbi_register_pmic_bus_access_notifier(
433 		&dev_priv->uncore.pmic_bus_access_nb);
434 	i915_check_and_clear_faults(dev_priv);
435 }
436 
intel_uncore_runtime_resume(struct drm_i915_private * dev_priv)437 void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv)
438 {
439 	iosf_mbi_register_pmic_bus_access_notifier(
440 		&dev_priv->uncore.pmic_bus_access_nb);
441 }
442 
intel_uncore_sanitize(struct drm_i915_private * dev_priv)443 void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
444 {
445 	i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6);
446 
447 	/* BIOS often leaves RC6 enabled, but disable it for hw init */
448 	intel_sanitize_gt_powersave(dev_priv);
449 }
450 
__intel_uncore_forcewake_get(struct drm_i915_private * dev_priv,enum forcewake_domains fw_domains)451 static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
452 					 enum forcewake_domains fw_domains)
453 {
454 	struct intel_uncore_forcewake_domain *domain;
455 	unsigned int tmp;
456 
457 	fw_domains &= dev_priv->uncore.fw_domains;
458 
459 	for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) {
460 		if (domain->wake_count++) {
461 			fw_domains &= ~domain->mask;
462 			domain->active = true;
463 		}
464 	}
465 
466 	if (fw_domains)
467 		dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
468 }
469 
470 /**
471  * intel_uncore_forcewake_get - grab forcewake domain references
472  * @dev_priv: i915 device instance
473  * @fw_domains: forcewake domains to get reference on
474  *
475  * This function can be used get GT's forcewake domain references.
476  * Normal register access will handle the forcewake domains automatically.
477  * However if some sequence requires the GT to not power down a particular
478  * forcewake domains this function should be called at the beginning of the
479  * sequence. And subsequently the reference should be dropped by symmetric
480  * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
481  * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
482  */
intel_uncore_forcewake_get(struct drm_i915_private * dev_priv,enum forcewake_domains fw_domains)483 void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
484 				enum forcewake_domains fw_domains)
485 {
486 	unsigned long irqflags;
487 
488 	if (!dev_priv->uncore.funcs.force_wake_get)
489 		return;
490 
491 	assert_rpm_wakelock_held(dev_priv);
492 
493 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
494 	__intel_uncore_forcewake_get(dev_priv, fw_domains);
495 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
496 }
497 
498 /**
499  * intel_uncore_forcewake_get__locked - grab forcewake domain references
500  * @dev_priv: i915 device instance
501  * @fw_domains: forcewake domains to get reference on
502  *
503  * See intel_uncore_forcewake_get(). This variant places the onus
504  * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
505  */
intel_uncore_forcewake_get__locked(struct drm_i915_private * dev_priv,enum forcewake_domains fw_domains)506 void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
507 					enum forcewake_domains fw_domains)
508 {
509 	lockdep_assert_held(&dev_priv->uncore.lock);
510 
511 	if (!dev_priv->uncore.funcs.force_wake_get)
512 		return;
513 
514 	__intel_uncore_forcewake_get(dev_priv, fw_domains);
515 }
516 
__intel_uncore_forcewake_put(struct drm_i915_private * dev_priv,enum forcewake_domains fw_domains)517 static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
518 					 enum forcewake_domains fw_domains)
519 {
520 	struct intel_uncore_forcewake_domain *domain;
521 	unsigned int tmp;
522 
523 	fw_domains &= dev_priv->uncore.fw_domains;
524 
525 	for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) {
526 		if (WARN_ON(domain->wake_count == 0))
527 			continue;
528 
529 		if (--domain->wake_count) {
530 			domain->active = true;
531 			continue;
532 		}
533 
534 		fw_domain_arm_timer(domain);
535 	}
536 }
537 
538 /**
539  * intel_uncore_forcewake_put - release a forcewake domain reference
540  * @dev_priv: i915 device instance
541  * @fw_domains: forcewake domains to put references
542  *
543  * This function drops the device-level forcewakes for specified
544  * domains obtained by intel_uncore_forcewake_get().
545  */
intel_uncore_forcewake_put(struct drm_i915_private * dev_priv,enum forcewake_domains fw_domains)546 void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
547 				enum forcewake_domains fw_domains)
548 {
549 	unsigned long irqflags;
550 
551 	if (!dev_priv->uncore.funcs.force_wake_put)
552 		return;
553 
554 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
555 	__intel_uncore_forcewake_put(dev_priv, fw_domains);
556 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
557 }
558 
559 /**
560  * intel_uncore_forcewake_put__locked - grab forcewake domain references
561  * @dev_priv: i915 device instance
562  * @fw_domains: forcewake domains to get reference on
563  *
564  * See intel_uncore_forcewake_put(). This variant places the onus
565  * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
566  */
intel_uncore_forcewake_put__locked(struct drm_i915_private * dev_priv,enum forcewake_domains fw_domains)567 void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
568 					enum forcewake_domains fw_domains)
569 {
570 	lockdep_assert_held(&dev_priv->uncore.lock);
571 
572 	if (!dev_priv->uncore.funcs.force_wake_put)
573 		return;
574 
575 	__intel_uncore_forcewake_put(dev_priv, fw_domains);
576 }
577 
assert_forcewakes_inactive(struct drm_i915_private * dev_priv)578 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
579 {
580 	if (!dev_priv->uncore.funcs.force_wake_get)
581 		return;
582 
583 	WARN_ON(dev_priv->uncore.fw_domains_active);
584 }
585 
586 /* We give fast paths for the really cool registers */
587 #define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
588 
589 #define __gen6_reg_read_fw_domains(offset) \
590 ({ \
591 	enum forcewake_domains __fwd; \
592 	if (NEEDS_FORCE_WAKE(offset)) \
593 		__fwd = FORCEWAKE_RENDER; \
594 	else \
595 		__fwd = 0; \
596 	__fwd; \
597 })
598 
fw_range_cmp(u32 offset,const struct intel_forcewake_range * entry)599 static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
600 {
601 	if (offset < entry->start)
602 		return -1;
603 	else if (offset > entry->end)
604 		return 1;
605 	else
606 		return 0;
607 }
608 
609 /* Copied and "macroized" from lib/bsearch.c */
610 #define BSEARCH(key, base, num, cmp) ({                                 \
611 	unsigned int start__ = 0, end__ = (num);                        \
612 	typeof(base) result__ = NULL;                                   \
613 	while (start__ < end__) {                                       \
614 		unsigned int mid__ = start__ + (end__ - start__) / 2;   \
615 		int ret__ = (cmp)((key), (base) + mid__);               \
616 		if (ret__ < 0) {                                        \
617 			end__ = mid__;                                  \
618 		} else if (ret__ > 0) {                                 \
619 			start__ = mid__ + 1;                            \
620 		} else {                                                \
621 			result__ = (base) + mid__;                      \
622 			break;                                          \
623 		}                                                       \
624 	}                                                               \
625 	result__;                                                       \
626 })
627 
628 static enum forcewake_domains
find_fw_domain(struct drm_i915_private * dev_priv,u32 offset)629 find_fw_domain(struct drm_i915_private *dev_priv, u32 offset)
630 {
631 	const struct intel_forcewake_range *entry;
632 
633 	entry = BSEARCH(offset,
634 			dev_priv->uncore.fw_domains_table,
635 			dev_priv->uncore.fw_domains_table_entries,
636 			fw_range_cmp);
637 
638 	if (!entry)
639 		return 0;
640 
641 	WARN(entry->domains & ~dev_priv->uncore.fw_domains,
642 	     "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
643 	     entry->domains & ~dev_priv->uncore.fw_domains, offset);
644 
645 	return entry->domains;
646 }
647 
648 #define GEN_FW_RANGE(s, e, d) \
649 	{ .start = (s), .end = (e), .domains = (d) }
650 
651 #define HAS_FWTABLE(dev_priv) \
652 	(INTEL_GEN(dev_priv) >= 9 || \
653 	 IS_CHERRYVIEW(dev_priv) || \
654 	 IS_VALLEYVIEW(dev_priv))
655 
656 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
657 static const struct intel_forcewake_range __vlv_fw_ranges[] = {
658 	GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
659 	GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
660 	GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
661 	GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
662 	GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
663 	GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
664 	GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
665 };
666 
667 #define __fwtable_reg_read_fw_domains(offset) \
668 ({ \
669 	enum forcewake_domains __fwd = 0; \
670 	if (NEEDS_FORCE_WAKE((offset))) \
671 		__fwd = find_fw_domain(dev_priv, offset); \
672 	__fwd; \
673 })
674 
675 /* *Must* be sorted by offset! See intel_shadow_table_check(). */
676 static const i915_reg_t gen8_shadowed_regs[] = {
677 	RING_TAIL(RENDER_RING_BASE),	/* 0x2000 (base) */
678 	GEN6_RPNSWREQ,			/* 0xA008 */
679 	GEN6_RC_VIDEO_FREQ,		/* 0xA00C */
680 	RING_TAIL(GEN6_BSD_RING_BASE),	/* 0x12000 (base) */
681 	RING_TAIL(VEBOX_RING_BASE),	/* 0x1a000 (base) */
682 	RING_TAIL(BLT_RING_BASE),	/* 0x22000 (base) */
683 	/* TODO: Other registers are not yet used */
684 };
685 
mmio_reg_cmp(u32 key,const i915_reg_t * reg)686 static int mmio_reg_cmp(u32 key, const i915_reg_t *reg)
687 {
688 	u32 offset = i915_mmio_reg_offset(*reg);
689 
690 	if (key < offset)
691 		return -1;
692 	else if (key > offset)
693 		return 1;
694 	else
695 		return 0;
696 }
697 
is_gen8_shadowed(u32 offset)698 static bool is_gen8_shadowed(u32 offset)
699 {
700 	const i915_reg_t *regs = gen8_shadowed_regs;
701 
702 	return BSEARCH(offset, regs, ARRAY_SIZE(gen8_shadowed_regs),
703 		       mmio_reg_cmp);
704 }
705 
706 #define __gen8_reg_write_fw_domains(offset) \
707 ({ \
708 	enum forcewake_domains __fwd; \
709 	if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \
710 		__fwd = FORCEWAKE_RENDER; \
711 	else \
712 		__fwd = 0; \
713 	__fwd; \
714 })
715 
716 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
717 static const struct intel_forcewake_range __chv_fw_ranges[] = {
718 	GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
719 	GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
720 	GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
721 	GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
722 	GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
723 	GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
724 	GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
725 	GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
726 	GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
727 	GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
728 	GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
729 	GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
730 	GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
731 	GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
732 	GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
733 	GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
734 };
735 
736 #define __fwtable_reg_write_fw_domains(offset) \
737 ({ \
738 	enum forcewake_domains __fwd = 0; \
739 	if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \
740 		__fwd = find_fw_domain(dev_priv, offset); \
741 	__fwd; \
742 })
743 
744 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
745 static const struct intel_forcewake_range __gen9_fw_ranges[] = {
746 	GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
747 	GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
748 	GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
749 	GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
750 	GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
751 	GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
752 	GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
753 	GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER),
754 	GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
755 	GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
756 	GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
757 	GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
758 	GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER),
759 	GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
760 	GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER),
761 	GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
762 	GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
763 	GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
764 	GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
765 	GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
766 	GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_BLITTER),
767 	GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
768 	GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER),
769 	GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
770 	GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER),
771 	GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
772 	GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER),
773 	GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
774 	GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER),
775 	GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
776 	GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER),
777 	GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
778 };
779 
780 static void
ilk_dummy_write(struct drm_i915_private * dev_priv)781 ilk_dummy_write(struct drm_i915_private *dev_priv)
782 {
783 	/* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
784 	 * the chip from rc6 before touching it for real. MI_MODE is masked,
785 	 * hence harmless to write 0 into. */
786 	__raw_i915_write32(dev_priv, MI_MODE, 0);
787 }
788 
789 static void
__unclaimed_reg_debug(struct drm_i915_private * dev_priv,const i915_reg_t reg,const bool read,const bool before)790 __unclaimed_reg_debug(struct drm_i915_private *dev_priv,
791 		      const i915_reg_t reg,
792 		      const bool read,
793 		      const bool before)
794 {
795 	if (WARN(check_for_unclaimed_mmio(dev_priv) && !before,
796 		 "Unclaimed %s register 0x%x\n",
797 		 read ? "read from" : "write to",
798 		 i915_mmio_reg_offset(reg)))
799 		i915.mmio_debug--; /* Only report the first N failures */
800 }
801 
802 static inline void
unclaimed_reg_debug(struct drm_i915_private * dev_priv,const i915_reg_t reg,const bool read,const bool before)803 unclaimed_reg_debug(struct drm_i915_private *dev_priv,
804 		    const i915_reg_t reg,
805 		    const bool read,
806 		    const bool before)
807 {
808 	if (likely(!i915.mmio_debug))
809 		return;
810 
811 	__unclaimed_reg_debug(dev_priv, reg, read, before);
812 }
813 
814 #define GEN2_READ_HEADER(x) \
815 	u##x val = 0; \
816 	assert_rpm_wakelock_held(dev_priv);
817 
818 #define GEN2_READ_FOOTER \
819 	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
820 	return val
821 
822 #define __gen2_read(x) \
823 static u##x \
824 gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
825 	GEN2_READ_HEADER(x); \
826 	val = __raw_i915_read##x(dev_priv, reg); \
827 	GEN2_READ_FOOTER; \
828 }
829 
830 #define __gen5_read(x) \
831 static u##x \
832 gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
833 	GEN2_READ_HEADER(x); \
834 	ilk_dummy_write(dev_priv); \
835 	val = __raw_i915_read##x(dev_priv, reg); \
836 	GEN2_READ_FOOTER; \
837 }
838 
839 __gen5_read(8)
840 __gen5_read(16)
841 __gen5_read(32)
842 __gen5_read(64)
843 __gen2_read(8)
844 __gen2_read(16)
845 __gen2_read(32)
846 __gen2_read(64)
847 
848 #undef __gen5_read
849 #undef __gen2_read
850 
851 #undef GEN2_READ_FOOTER
852 #undef GEN2_READ_HEADER
853 
854 #define GEN6_READ_HEADER(x) \
855 	u32 offset = i915_mmio_reg_offset(reg); \
856 	unsigned long irqflags; \
857 	u##x val = 0; \
858 	assert_rpm_wakelock_held(dev_priv); \
859 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
860 	unclaimed_reg_debug(dev_priv, reg, true, true)
861 
862 #define GEN6_READ_FOOTER \
863 	unclaimed_reg_debug(dev_priv, reg, true, false); \
864 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
865 	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
866 	return val
867 
___force_wake_auto(struct drm_i915_private * dev_priv,enum forcewake_domains fw_domains)868 static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv,
869 					enum forcewake_domains fw_domains)
870 {
871 	struct intel_uncore_forcewake_domain *domain;
872 	unsigned int tmp;
873 
874 	GEM_BUG_ON(fw_domains & ~dev_priv->uncore.fw_domains);
875 
876 	for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp)
877 		fw_domain_arm_timer(domain);
878 
879 	dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
880 }
881 
__force_wake_auto(struct drm_i915_private * dev_priv,enum forcewake_domains fw_domains)882 static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
883 				     enum forcewake_domains fw_domains)
884 {
885 	if (WARN_ON(!fw_domains))
886 		return;
887 
888 	/* Turn on all requested but inactive supported forcewake domains. */
889 	fw_domains &= dev_priv->uncore.fw_domains;
890 	fw_domains &= ~dev_priv->uncore.fw_domains_active;
891 
892 	if (fw_domains)
893 		___force_wake_auto(dev_priv, fw_domains);
894 }
895 
896 #define __gen_read(func, x) \
897 static u##x \
898 func##_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
899 	enum forcewake_domains fw_engine; \
900 	GEN6_READ_HEADER(x); \
901 	fw_engine = __##func##_reg_read_fw_domains(offset); \
902 	if (fw_engine) \
903 		__force_wake_auto(dev_priv, fw_engine); \
904 	val = __raw_i915_read##x(dev_priv, reg); \
905 	GEN6_READ_FOOTER; \
906 }
907 #define __gen6_read(x) __gen_read(gen6, x)
908 #define __fwtable_read(x) __gen_read(fwtable, x)
909 
910 __fwtable_read(8)
911 __fwtable_read(16)
912 __fwtable_read(32)
913 __fwtable_read(64)
914 __gen6_read(8)
915 __gen6_read(16)
916 __gen6_read(32)
917 __gen6_read(64)
918 
919 #undef __fwtable_read
920 #undef __gen6_read
921 #undef GEN6_READ_FOOTER
922 #undef GEN6_READ_HEADER
923 
924 #define GEN2_WRITE_HEADER \
925 	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
926 	assert_rpm_wakelock_held(dev_priv); \
927 
928 #define GEN2_WRITE_FOOTER
929 
930 #define __gen2_write(x) \
931 static void \
932 gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
933 	GEN2_WRITE_HEADER; \
934 	__raw_i915_write##x(dev_priv, reg, val); \
935 	GEN2_WRITE_FOOTER; \
936 }
937 
938 #define __gen5_write(x) \
939 static void \
940 gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
941 	GEN2_WRITE_HEADER; \
942 	ilk_dummy_write(dev_priv); \
943 	__raw_i915_write##x(dev_priv, reg, val); \
944 	GEN2_WRITE_FOOTER; \
945 }
946 
947 __gen5_write(8)
948 __gen5_write(16)
949 __gen5_write(32)
950 __gen2_write(8)
951 __gen2_write(16)
952 __gen2_write(32)
953 
954 #undef __gen5_write
955 #undef __gen2_write
956 
957 #undef GEN2_WRITE_FOOTER
958 #undef GEN2_WRITE_HEADER
959 
960 #define GEN6_WRITE_HEADER \
961 	u32 offset = i915_mmio_reg_offset(reg); \
962 	unsigned long irqflags; \
963 	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
964 	assert_rpm_wakelock_held(dev_priv); \
965 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
966 	unclaimed_reg_debug(dev_priv, reg, false, true)
967 
968 #define GEN6_WRITE_FOOTER \
969 	unclaimed_reg_debug(dev_priv, reg, false, false); \
970 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
971 
972 #define __gen6_write(x) \
973 static void \
974 gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
975 	GEN6_WRITE_HEADER; \
976 	if (NEEDS_FORCE_WAKE(offset)) \
977 		__gen6_gt_wait_for_fifo(dev_priv); \
978 	__raw_i915_write##x(dev_priv, reg, val); \
979 	GEN6_WRITE_FOOTER; \
980 }
981 
982 #define __gen_write(func, x) \
983 static void \
984 func##_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
985 	enum forcewake_domains fw_engine; \
986 	GEN6_WRITE_HEADER; \
987 	fw_engine = __##func##_reg_write_fw_domains(offset); \
988 	if (fw_engine) \
989 		__force_wake_auto(dev_priv, fw_engine); \
990 	__raw_i915_write##x(dev_priv, reg, val); \
991 	GEN6_WRITE_FOOTER; \
992 }
993 #define __gen8_write(x) __gen_write(gen8, x)
994 #define __fwtable_write(x) __gen_write(fwtable, x)
995 
996 __fwtable_write(8)
997 __fwtable_write(16)
998 __fwtable_write(32)
999 __gen8_write(8)
1000 __gen8_write(16)
1001 __gen8_write(32)
1002 __gen6_write(8)
1003 __gen6_write(16)
1004 __gen6_write(32)
1005 
1006 #undef __fwtable_write
1007 #undef __gen8_write
1008 #undef __gen6_write
1009 #undef GEN6_WRITE_FOOTER
1010 #undef GEN6_WRITE_HEADER
1011 
1012 #define ASSIGN_WRITE_MMIO_VFUNCS(i915, x) \
1013 do { \
1014 	(i915)->uncore.funcs.mmio_writeb = x##_write8; \
1015 	(i915)->uncore.funcs.mmio_writew = x##_write16; \
1016 	(i915)->uncore.funcs.mmio_writel = x##_write32; \
1017 } while (0)
1018 
1019 #define ASSIGN_READ_MMIO_VFUNCS(i915, x) \
1020 do { \
1021 	(i915)->uncore.funcs.mmio_readb = x##_read8; \
1022 	(i915)->uncore.funcs.mmio_readw = x##_read16; \
1023 	(i915)->uncore.funcs.mmio_readl = x##_read32; \
1024 	(i915)->uncore.funcs.mmio_readq = x##_read64; \
1025 } while (0)
1026 
1027 
fw_domain_init(struct drm_i915_private * dev_priv,enum forcewake_domain_id domain_id,i915_reg_t reg_set,i915_reg_t reg_ack)1028 static void fw_domain_init(struct drm_i915_private *dev_priv,
1029 			   enum forcewake_domain_id domain_id,
1030 			   i915_reg_t reg_set,
1031 			   i915_reg_t reg_ack)
1032 {
1033 	struct intel_uncore_forcewake_domain *d;
1034 
1035 	if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
1036 		return;
1037 
1038 	d = &dev_priv->uncore.fw_domain[domain_id];
1039 
1040 	WARN_ON(d->wake_count);
1041 
1042 	WARN_ON(!i915_mmio_reg_valid(reg_set));
1043 	WARN_ON(!i915_mmio_reg_valid(reg_ack));
1044 
1045 	d->wake_count = 0;
1046 	d->reg_set = reg_set;
1047 	d->reg_ack = reg_ack;
1048 
1049 	d->id = domain_id;
1050 
1051 	BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
1052 	BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER));
1053 	BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
1054 
1055 	d->mask = BIT(domain_id);
1056 
1057 	hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1058 	d->timer.function = intel_uncore_fw_release_timer;
1059 
1060 	dev_priv->uncore.fw_domains |= BIT(domain_id);
1061 
1062 	fw_domain_reset(dev_priv, d);
1063 }
1064 
intel_uncore_fw_domains_init(struct drm_i915_private * dev_priv)1065 static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
1066 {
1067 	if (INTEL_GEN(dev_priv) <= 5 || intel_vgpu_active(dev_priv))
1068 		return;
1069 
1070 	if (IS_GEN6(dev_priv)) {
1071 		dev_priv->uncore.fw_reset = 0;
1072 		dev_priv->uncore.fw_set = FORCEWAKE_KERNEL;
1073 		dev_priv->uncore.fw_clear = 0;
1074 	} else {
1075 		/* WaRsClearFWBitsAtReset:bdw,skl */
1076 		dev_priv->uncore.fw_reset = _MASKED_BIT_DISABLE(0xffff);
1077 		dev_priv->uncore.fw_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
1078 		dev_priv->uncore.fw_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
1079 	}
1080 
1081 	if (INTEL_GEN(dev_priv) >= 9) {
1082 		dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1083 		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1084 		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1085 			       FORCEWAKE_RENDER_GEN9,
1086 			       FORCEWAKE_ACK_RENDER_GEN9);
1087 		fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
1088 			       FORCEWAKE_BLITTER_GEN9,
1089 			       FORCEWAKE_ACK_BLITTER_GEN9);
1090 		fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1091 			       FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
1092 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1093 		dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1094 		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1095 		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1096 			       FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
1097 		fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1098 			       FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
1099 	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
1100 		dev_priv->uncore.funcs.force_wake_get =
1101 			fw_domains_get_with_thread_status;
1102 		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1103 		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1104 			       FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
1105 	} else if (IS_IVYBRIDGE(dev_priv)) {
1106 		u32 ecobus;
1107 
1108 		/* IVB configs may use multi-threaded forcewake */
1109 
1110 		/* A small trick here - if the bios hasn't configured
1111 		 * MT forcewake, and if the device is in RC6, then
1112 		 * force_wake_mt_get will not wake the device and the
1113 		 * ECOBUS read will return zero. Which will be
1114 		 * (correctly) interpreted by the test below as MT
1115 		 * forcewake being disabled.
1116 		 */
1117 		dev_priv->uncore.funcs.force_wake_get =
1118 			fw_domains_get_with_thread_status;
1119 		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1120 
1121 		/* We need to init first for ECOBUS access and then
1122 		 * determine later if we want to reinit, in case of MT access is
1123 		 * not working. In this stage we don't know which flavour this
1124 		 * ivb is, so it is better to reset also the gen6 fw registers
1125 		 * before the ecobus check.
1126 		 */
1127 
1128 		__raw_i915_write32(dev_priv, FORCEWAKE, 0);
1129 		__raw_posting_read(dev_priv, ECOBUS);
1130 
1131 		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1132 			       FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1133 
1134 		spin_lock_irq(&dev_priv->uncore.lock);
1135 		fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_RENDER);
1136 		ecobus = __raw_i915_read32(dev_priv, ECOBUS);
1137 		fw_domains_put(dev_priv, FORCEWAKE_RENDER);
1138 		spin_unlock_irq(&dev_priv->uncore.lock);
1139 
1140 		if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
1141 			DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
1142 			DRM_INFO("when using vblank-synced partial screen updates.\n");
1143 			fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1144 				       FORCEWAKE, FORCEWAKE_ACK);
1145 		}
1146 	} else if (IS_GEN6(dev_priv)) {
1147 		dev_priv->uncore.funcs.force_wake_get =
1148 			fw_domains_get_with_thread_status;
1149 		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1150 		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1151 			       FORCEWAKE, FORCEWAKE_ACK);
1152 	}
1153 
1154 	/* All future platforms are expected to require complex power gating */
1155 	WARN_ON(dev_priv->uncore.fw_domains == 0);
1156 }
1157 
1158 #define ASSIGN_FW_DOMAINS_TABLE(d) \
1159 { \
1160 	dev_priv->uncore.fw_domains_table = \
1161 			(struct intel_forcewake_range *)(d); \
1162 	dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \
1163 }
1164 
i915_pmic_bus_access_notifier(struct notifier_block * nb,unsigned long action,void * data)1165 static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
1166 					 unsigned long action, void *data)
1167 {
1168 	struct drm_i915_private *dev_priv = container_of(nb,
1169 			struct drm_i915_private, uncore.pmic_bus_access_nb);
1170 
1171 	switch (action) {
1172 	case MBI_PMIC_BUS_ACCESS_BEGIN:
1173 		/*
1174 		 * forcewake all now to make sure that we don't need to do a
1175 		 * forcewake later which on systems where this notifier gets
1176 		 * called requires the punit to access to the shared pmic i2c
1177 		 * bus, which will be busy after this notification, leading to:
1178 		 * "render: timed out waiting for forcewake ack request."
1179 		 * errors.
1180 		 *
1181 		 * The notifier is unregistered during intel_runtime_suspend(),
1182 		 * so it's ok to access the HW here without holding a RPM
1183 		 * wake reference -> disable wakeref asserts for the time of
1184 		 * the access.
1185 		 */
1186 		disable_rpm_wakeref_asserts(dev_priv);
1187 		intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1188 		enable_rpm_wakeref_asserts(dev_priv);
1189 		break;
1190 	case MBI_PMIC_BUS_ACCESS_END:
1191 		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1192 		break;
1193 	}
1194 
1195 	return NOTIFY_OK;
1196 }
1197 
intel_uncore_init(struct drm_i915_private * dev_priv)1198 void intel_uncore_init(struct drm_i915_private *dev_priv)
1199 {
1200 	i915_check_vgpu(dev_priv);
1201 
1202 	intel_uncore_edram_detect(dev_priv);
1203 	intel_uncore_fw_domains_init(dev_priv);
1204 	__intel_uncore_early_sanitize(dev_priv, false);
1205 
1206 	dev_priv->uncore.unclaimed_mmio_check = 1;
1207 	dev_priv->uncore.pmic_bus_access_nb.notifier_call =
1208 		i915_pmic_bus_access_notifier;
1209 
1210 	if (IS_GEN(dev_priv, 2, 4) || intel_vgpu_active(dev_priv)) {
1211 		ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen2);
1212 		ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen2);
1213 	} else if (IS_GEN5(dev_priv)) {
1214 		ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen5);
1215 		ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen5);
1216 	} else if (IS_GEN(dev_priv, 6, 7)) {
1217 		ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen6);
1218 
1219 		if (IS_VALLEYVIEW(dev_priv)) {
1220 			ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges);
1221 			ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
1222 		} else {
1223 			ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6);
1224 		}
1225 	} else if (IS_GEN8(dev_priv)) {
1226 		if (IS_CHERRYVIEW(dev_priv)) {
1227 			ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges);
1228 			ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable);
1229 			ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
1230 
1231 		} else {
1232 			ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen8);
1233 			ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6);
1234 		}
1235 	} else {
1236 		ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges);
1237 		ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable);
1238 		ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
1239 	}
1240 
1241 	iosf_mbi_register_pmic_bus_access_notifier(
1242 		&dev_priv->uncore.pmic_bus_access_nb);
1243 
1244 	i915_check_and_clear_faults(dev_priv);
1245 }
1246 
intel_uncore_fini(struct drm_i915_private * dev_priv)1247 void intel_uncore_fini(struct drm_i915_private *dev_priv)
1248 {
1249 	iosf_mbi_unregister_pmic_bus_access_notifier(
1250 		&dev_priv->uncore.pmic_bus_access_nb);
1251 
1252 	/* Paranoia: make sure we have disabled everything before we exit. */
1253 	intel_uncore_sanitize(dev_priv);
1254 	intel_uncore_forcewake_reset(dev_priv, false);
1255 }
1256 
1257 #define GEN_RANGE(l, h) GENMASK((h) - 1, (l) - 1)
1258 
1259 static const struct register_whitelist {
1260 	i915_reg_t offset_ldw, offset_udw;
1261 	uint32_t size;
1262 	/* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
1263 	uint32_t gen_bitmask;
1264 } whitelist[] = {
1265 	{ .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
1266 	  .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
1267 	  .size = 8, .gen_bitmask = GEN_RANGE(4, 9) },
1268 };
1269 
i915_reg_read_ioctl(struct drm_device * dev,void * data,struct drm_file * file)1270 int i915_reg_read_ioctl(struct drm_device *dev,
1271 			void *data, struct drm_file *file)
1272 {
1273 	struct drm_i915_private *dev_priv = to_i915(dev);
1274 	struct drm_i915_reg_read *reg = data;
1275 	struct register_whitelist const *entry = whitelist;
1276 	unsigned size;
1277 	i915_reg_t offset_ldw, offset_udw;
1278 	int i, ret = 0;
1279 
1280 	for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1281 		if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) &&
1282 		    (INTEL_INFO(dev_priv)->gen_mask & entry->gen_bitmask))
1283 			break;
1284 	}
1285 
1286 	if (i == ARRAY_SIZE(whitelist))
1287 		return -EINVAL;
1288 
1289 	/* We use the low bits to encode extra flags as the register should
1290 	 * be naturally aligned (and those that are not so aligned merely
1291 	 * limit the available flags for that register).
1292 	 */
1293 	offset_ldw = entry->offset_ldw;
1294 	offset_udw = entry->offset_udw;
1295 	size = entry->size;
1296 	size |= reg->offset ^ i915_mmio_reg_offset(offset_ldw);
1297 
1298 	intel_runtime_pm_get(dev_priv);
1299 
1300 	switch (size) {
1301 	case 8 | 1:
1302 		reg->val = I915_READ64_2x32(offset_ldw, offset_udw);
1303 		break;
1304 	case 8:
1305 		reg->val = I915_READ64(offset_ldw);
1306 		break;
1307 	case 4:
1308 		reg->val = I915_READ(offset_ldw);
1309 		break;
1310 	case 2:
1311 		reg->val = I915_READ16(offset_ldw);
1312 		break;
1313 	case 1:
1314 		reg->val = I915_READ8(offset_ldw);
1315 		break;
1316 	default:
1317 		ret = -EINVAL;
1318 		goto out;
1319 	}
1320 
1321 out:
1322 	intel_runtime_pm_put(dev_priv);
1323 	return ret;
1324 }
1325 
gen3_stop_rings(struct drm_i915_private * dev_priv)1326 static void gen3_stop_rings(struct drm_i915_private *dev_priv)
1327 {
1328 	struct intel_engine_cs *engine;
1329 	enum intel_engine_id id;
1330 
1331 	for_each_engine(engine, dev_priv, id) {
1332 		const u32 base = engine->mmio_base;
1333 		const i915_reg_t mode = RING_MI_MODE(base);
1334 
1335 		I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING));
1336 		if (intel_wait_for_register_fw(dev_priv,
1337 					       mode,
1338 					       MODE_IDLE,
1339 					       MODE_IDLE,
1340 					       500))
1341 			DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n",
1342 					 engine->name);
1343 
1344 		I915_WRITE_FW(RING_CTL(base), 0);
1345 		I915_WRITE_FW(RING_HEAD(base), 0);
1346 		I915_WRITE_FW(RING_TAIL(base), 0);
1347 
1348 		/* Check acts as a post */
1349 		if (I915_READ_FW(RING_HEAD(base)) != 0)
1350 			DRM_DEBUG_DRIVER("%s: ring head not parked\n",
1351 					 engine->name);
1352 	}
1353 }
1354 
i915_reset_complete(struct pci_dev * pdev)1355 static bool i915_reset_complete(struct pci_dev *pdev)
1356 {
1357 	u8 gdrst;
1358 
1359 	pci_read_config_byte(pdev, I915_GDRST, &gdrst);
1360 	return (gdrst & GRDOM_RESET_STATUS) == 0;
1361 }
1362 
i915_do_reset(struct drm_i915_private * dev_priv,unsigned engine_mask)1363 static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1364 {
1365 	struct pci_dev *pdev = dev_priv->drm.pdev;
1366 
1367 	/* assert reset for at least 20 usec */
1368 	pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1369 	usleep_range(50, 200);
1370 	pci_write_config_byte(pdev, I915_GDRST, 0);
1371 
1372 	return wait_for(i915_reset_complete(pdev), 500);
1373 }
1374 
g4x_reset_complete(struct pci_dev * pdev)1375 static bool g4x_reset_complete(struct pci_dev *pdev)
1376 {
1377 	u8 gdrst;
1378 
1379 	pci_read_config_byte(pdev, I915_GDRST, &gdrst);
1380 	return (gdrst & GRDOM_RESET_ENABLE) == 0;
1381 }
1382 
g33_do_reset(struct drm_i915_private * dev_priv,unsigned engine_mask)1383 static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1384 {
1385 	struct pci_dev *pdev = dev_priv->drm.pdev;
1386 
1387 	/* Stop engines before we reset; see g4x_do_reset() below for why. */
1388 	gen3_stop_rings(dev_priv);
1389 
1390 	pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1391 	return wait_for(g4x_reset_complete(pdev), 500);
1392 }
1393 
g4x_do_reset(struct drm_i915_private * dev_priv,unsigned engine_mask)1394 static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1395 {
1396 	struct pci_dev *pdev = dev_priv->drm.pdev;
1397 	int ret;
1398 
1399 	/* WaVcpClkGateDisableForMediaReset:ctg,elk */
1400 	I915_WRITE(VDECCLK_GATE_D,
1401 		   I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
1402 	POSTING_READ(VDECCLK_GATE_D);
1403 
1404 	/* We stop engines, otherwise we might get failed reset and a
1405 	 * dead gpu (on elk).
1406 	 * WaMediaResetMainRingCleanup:ctg,elk (presumably)
1407 	 */
1408 	gen3_stop_rings(dev_priv);
1409 
1410 	pci_write_config_byte(pdev, I915_GDRST,
1411 			      GRDOM_MEDIA | GRDOM_RESET_ENABLE);
1412 	ret =  wait_for(g4x_reset_complete(pdev), 500);
1413 	if (ret) {
1414 		DRM_DEBUG_DRIVER("Wait for media reset failed\n");
1415 		goto out;
1416 	}
1417 
1418 	pci_write_config_byte(pdev, I915_GDRST,
1419 			      GRDOM_RENDER | GRDOM_RESET_ENABLE);
1420 	ret =  wait_for(g4x_reset_complete(pdev), 500);
1421 	if (ret) {
1422 		DRM_DEBUG_DRIVER("Wait for render reset failed\n");
1423 		goto out;
1424 	}
1425 
1426 out:
1427 	pci_write_config_byte(pdev, I915_GDRST, 0);
1428 
1429 	I915_WRITE(VDECCLK_GATE_D,
1430 		   I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
1431 	POSTING_READ(VDECCLK_GATE_D);
1432 
1433 	return ret;
1434 }
1435 
ironlake_do_reset(struct drm_i915_private * dev_priv,unsigned engine_mask)1436 static int ironlake_do_reset(struct drm_i915_private *dev_priv,
1437 			     unsigned engine_mask)
1438 {
1439 	int ret;
1440 
1441 	I915_WRITE(ILK_GDSR, ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
1442 	ret = intel_wait_for_register(dev_priv,
1443 				      ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
1444 				      500);
1445 	if (ret) {
1446 		DRM_DEBUG_DRIVER("Wait for render reset failed\n");
1447 		goto out;
1448 	}
1449 
1450 	I915_WRITE(ILK_GDSR, ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
1451 	ret = intel_wait_for_register(dev_priv,
1452 				      ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
1453 				      500);
1454 	if (ret) {
1455 		DRM_DEBUG_DRIVER("Wait for media reset failed\n");
1456 		goto out;
1457 	}
1458 
1459 out:
1460 	I915_WRITE(ILK_GDSR, 0);
1461 	POSTING_READ(ILK_GDSR);
1462 	return ret;
1463 }
1464 
1465 /* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
gen6_hw_domain_reset(struct drm_i915_private * dev_priv,u32 hw_domain_mask)1466 static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
1467 				u32 hw_domain_mask)
1468 {
1469 	int err;
1470 
1471 	/* GEN6_GDRST is not in the gt power well, no need to check
1472 	 * for fifo space for the write or forcewake the chip for
1473 	 * the read
1474 	 */
1475 	__raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask);
1476 
1477 	/* Wait for the device to ack the reset requests */
1478 	err = intel_wait_for_register_fw(dev_priv,
1479 					  GEN6_GDRST, hw_domain_mask, 0,
1480 					  500);
1481 	if (err)
1482 		DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n",
1483 				 hw_domain_mask);
1484 
1485 	return err;
1486 }
1487 
1488 /**
1489  * gen6_reset_engines - reset individual engines
1490  * @dev_priv: i915 device
1491  * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
1492  *
1493  * This function will reset the individual engines that are set in engine_mask.
1494  * If you provide ALL_ENGINES as mask, full global domain reset will be issued.
1495  *
1496  * Note: It is responsibility of the caller to handle the difference between
1497  * asking full domain reset versus reset for all available individual engines.
1498  *
1499  * Returns 0 on success, nonzero on error.
1500  */
gen6_reset_engines(struct drm_i915_private * dev_priv,unsigned engine_mask)1501 static int gen6_reset_engines(struct drm_i915_private *dev_priv,
1502 			      unsigned engine_mask)
1503 {
1504 	struct intel_engine_cs *engine;
1505 	const u32 hw_engine_mask[I915_NUM_ENGINES] = {
1506 		[RCS] = GEN6_GRDOM_RENDER,
1507 		[BCS] = GEN6_GRDOM_BLT,
1508 		[VCS] = GEN6_GRDOM_MEDIA,
1509 		[VCS2] = GEN8_GRDOM_MEDIA2,
1510 		[VECS] = GEN6_GRDOM_VECS,
1511 	};
1512 	u32 hw_mask;
1513 
1514 	if (engine_mask == ALL_ENGINES) {
1515 		hw_mask = GEN6_GRDOM_FULL;
1516 	} else {
1517 		unsigned int tmp;
1518 
1519 		hw_mask = 0;
1520 		for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
1521 			hw_mask |= hw_engine_mask[engine->id];
1522 	}
1523 
1524 	return gen6_hw_domain_reset(dev_priv, hw_mask);
1525 }
1526 
1527 /**
1528  * __intel_wait_for_register_fw - wait until register matches expected state
1529  * @dev_priv: the i915 device
1530  * @reg: the register to read
1531  * @mask: mask to apply to register value
1532  * @value: expected value
1533  * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
1534  * @slow_timeout_ms: slow timeout in millisecond
1535  * @out_value: optional placeholder to hold registry value
1536  *
1537  * This routine waits until the target register @reg contains the expected
1538  * @value after applying the @mask, i.e. it waits until ::
1539  *
1540  *     (I915_READ_FW(reg) & mask) == value
1541  *
1542  * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds.
1543  * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us
1544  * must be not larger than 20,0000 microseconds.
1545  *
1546  * Note that this routine assumes the caller holds forcewake asserted, it is
1547  * not suitable for very long waits. See intel_wait_for_register() if you
1548  * wish to wait without holding forcewake for the duration (i.e. you expect
1549  * the wait to be slow).
1550  *
1551  * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
1552  */
__intel_wait_for_register_fw(struct drm_i915_private * dev_priv,i915_reg_t reg,u32 mask,u32 value,unsigned int fast_timeout_us,unsigned int slow_timeout_ms,u32 * out_value)1553 int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
1554 				 i915_reg_t reg,
1555 				 u32 mask,
1556 				 u32 value,
1557 				 unsigned int fast_timeout_us,
1558 				 unsigned int slow_timeout_ms,
1559 				 u32 *out_value)
1560 {
1561 	u32 uninitialized_var(reg_value);
1562 #define done (((reg_value = I915_READ_FW(reg)) & mask) == value)
1563 	int ret;
1564 
1565 	/* Catch any overuse of this function */
1566 	might_sleep_if(slow_timeout_ms);
1567 	GEM_BUG_ON(fast_timeout_us > 20000);
1568 
1569 	ret = -ETIMEDOUT;
1570 	if (fast_timeout_us && fast_timeout_us <= 20000)
1571 		ret = _wait_for_atomic(done, fast_timeout_us, 0);
1572 	if (ret && slow_timeout_ms)
1573 		ret = wait_for(done, slow_timeout_ms);
1574 
1575 	if (out_value)
1576 		*out_value = reg_value;
1577 
1578 	return ret;
1579 #undef done
1580 }
1581 
1582 /**
1583  * intel_wait_for_register - wait until register matches expected state
1584  * @dev_priv: the i915 device
1585  * @reg: the register to read
1586  * @mask: mask to apply to register value
1587  * @value: expected value
1588  * @timeout_ms: timeout in millisecond
1589  *
1590  * This routine waits until the target register @reg contains the expected
1591  * @value after applying the @mask, i.e. it waits until ::
1592  *
1593  *     (I915_READ(reg) & mask) == value
1594  *
1595  * Otherwise, the wait will timeout after @timeout_ms milliseconds.
1596  *
1597  * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
1598  */
intel_wait_for_register(struct drm_i915_private * dev_priv,i915_reg_t reg,u32 mask,u32 value,unsigned int timeout_ms)1599 int intel_wait_for_register(struct drm_i915_private *dev_priv,
1600 			    i915_reg_t reg,
1601 			    u32 mask,
1602 			    u32 value,
1603 			    unsigned int timeout_ms)
1604 {
1605 	unsigned fw =
1606 		intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
1607 	int ret;
1608 
1609 	might_sleep();
1610 
1611 	spin_lock_irq(&dev_priv->uncore.lock);
1612 	intel_uncore_forcewake_get__locked(dev_priv, fw);
1613 
1614 	ret = __intel_wait_for_register_fw(dev_priv,
1615 					   reg, mask, value,
1616 					   2, 0, NULL);
1617 
1618 	intel_uncore_forcewake_put__locked(dev_priv, fw);
1619 	spin_unlock_irq(&dev_priv->uncore.lock);
1620 
1621 	if (ret)
1622 		ret = wait_for((I915_READ_NOTRACE(reg) & mask) == value,
1623 			       timeout_ms);
1624 
1625 	return ret;
1626 }
1627 
gen8_reset_engine_start(struct intel_engine_cs * engine)1628 static int gen8_reset_engine_start(struct intel_engine_cs *engine)
1629 {
1630 	struct drm_i915_private *dev_priv = engine->i915;
1631 	int ret;
1632 
1633 	I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
1634 		      _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
1635 
1636 	ret = intel_wait_for_register_fw(dev_priv,
1637 					 RING_RESET_CTL(engine->mmio_base),
1638 					 RESET_CTL_READY_TO_RESET,
1639 					 RESET_CTL_READY_TO_RESET,
1640 					 700);
1641 	if (ret)
1642 		DRM_ERROR("%s: reset request timeout\n", engine->name);
1643 
1644 	return ret;
1645 }
1646 
gen8_reset_engine_cancel(struct intel_engine_cs * engine)1647 static void gen8_reset_engine_cancel(struct intel_engine_cs *engine)
1648 {
1649 	struct drm_i915_private *dev_priv = engine->i915;
1650 
1651 	I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
1652 		      _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
1653 }
1654 
gen8_reset_engines(struct drm_i915_private * dev_priv,unsigned engine_mask)1655 static int gen8_reset_engines(struct drm_i915_private *dev_priv,
1656 			      unsigned engine_mask)
1657 {
1658 	struct intel_engine_cs *engine;
1659 	unsigned int tmp;
1660 
1661 	for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
1662 		if (gen8_reset_engine_start(engine))
1663 			goto not_ready;
1664 
1665 	return gen6_reset_engines(dev_priv, engine_mask);
1666 
1667 not_ready:
1668 	for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
1669 		gen8_reset_engine_cancel(engine);
1670 
1671 	return -EIO;
1672 }
1673 
1674 typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask);
1675 
intel_get_gpu_reset(struct drm_i915_private * dev_priv)1676 static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
1677 {
1678 	if (!i915.reset)
1679 		return NULL;
1680 
1681 	if (INTEL_INFO(dev_priv)->gen >= 8)
1682 		return gen8_reset_engines;
1683 	else if (INTEL_INFO(dev_priv)->gen >= 6)
1684 		return gen6_reset_engines;
1685 	else if (IS_GEN5(dev_priv))
1686 		return ironlake_do_reset;
1687 	else if (IS_G4X(dev_priv))
1688 		return g4x_do_reset;
1689 	else if (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
1690 		return g33_do_reset;
1691 	else if (INTEL_INFO(dev_priv)->gen >= 3)
1692 		return i915_do_reset;
1693 	else
1694 		return NULL;
1695 }
1696 
intel_gpu_reset(struct drm_i915_private * dev_priv,unsigned engine_mask)1697 int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1698 {
1699 	reset_func reset;
1700 	int retry;
1701 	int ret;
1702 
1703 	might_sleep();
1704 
1705 	reset = intel_get_gpu_reset(dev_priv);
1706 	if (reset == NULL)
1707 		return -ENODEV;
1708 
1709 	/* If the power well sleeps during the reset, the reset
1710 	 * request may be dropped and never completes (causing -EIO).
1711 	 */
1712 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1713 	for (retry = 0; retry < 3; retry++) {
1714 		ret = reset(dev_priv, engine_mask);
1715 		if (ret != -ETIMEDOUT)
1716 			break;
1717 
1718 		cond_resched();
1719 	}
1720 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1721 
1722 	return ret;
1723 }
1724 
intel_has_gpu_reset(struct drm_i915_private * dev_priv)1725 bool intel_has_gpu_reset(struct drm_i915_private *dev_priv)
1726 {
1727 	return intel_get_gpu_reset(dev_priv) != NULL;
1728 }
1729 
1730 /*
1731  * When GuC submission is enabled, GuC manages ELSP and can initiate the
1732  * engine reset too. For now, fall back to full GPU reset if it is enabled.
1733  */
intel_has_reset_engine(struct drm_i915_private * dev_priv)1734 bool intel_has_reset_engine(struct drm_i915_private *dev_priv)
1735 {
1736 	return (dev_priv->info.has_reset_engine &&
1737 		!dev_priv->guc.execbuf_client &&
1738 		i915.reset >= 2);
1739 }
1740 
intel_guc_reset(struct drm_i915_private * dev_priv)1741 int intel_guc_reset(struct drm_i915_private *dev_priv)
1742 {
1743 	int ret;
1744 
1745 	if (!HAS_GUC(dev_priv))
1746 		return -EINVAL;
1747 
1748 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1749 	ret = gen6_hw_domain_reset(dev_priv, GEN9_GRDOM_GUC);
1750 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1751 
1752 	return ret;
1753 }
1754 
intel_uncore_unclaimed_mmio(struct drm_i915_private * dev_priv)1755 bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
1756 {
1757 	return check_for_unclaimed_mmio(dev_priv);
1758 }
1759 
1760 bool
intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private * dev_priv)1761 intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
1762 {
1763 	if (unlikely(i915.mmio_debug ||
1764 		     dev_priv->uncore.unclaimed_mmio_check <= 0))
1765 		return false;
1766 
1767 	if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) {
1768 		DRM_DEBUG("Unclaimed register detected, "
1769 			  "enabling oneshot unclaimed register reporting. "
1770 			  "Please use i915.mmio_debug=N for more information.\n");
1771 		i915.mmio_debug++;
1772 		dev_priv->uncore.unclaimed_mmio_check--;
1773 		return true;
1774 	}
1775 
1776 	return false;
1777 }
1778 
1779 static enum forcewake_domains
intel_uncore_forcewake_for_read(struct drm_i915_private * dev_priv,i915_reg_t reg)1780 intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
1781 				i915_reg_t reg)
1782 {
1783 	u32 offset = i915_mmio_reg_offset(reg);
1784 	enum forcewake_domains fw_domains;
1785 
1786 	if (HAS_FWTABLE(dev_priv)) {
1787 		fw_domains = __fwtable_reg_read_fw_domains(offset);
1788 	} else if (INTEL_GEN(dev_priv) >= 6) {
1789 		fw_domains = __gen6_reg_read_fw_domains(offset);
1790 	} else {
1791 		WARN_ON(!IS_GEN(dev_priv, 2, 5));
1792 		fw_domains = 0;
1793 	}
1794 
1795 	WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
1796 
1797 	return fw_domains;
1798 }
1799 
1800 static enum forcewake_domains
intel_uncore_forcewake_for_write(struct drm_i915_private * dev_priv,i915_reg_t reg)1801 intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
1802 				 i915_reg_t reg)
1803 {
1804 	u32 offset = i915_mmio_reg_offset(reg);
1805 	enum forcewake_domains fw_domains;
1806 
1807 	if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) {
1808 		fw_domains = __fwtable_reg_write_fw_domains(offset);
1809 	} else if (IS_GEN8(dev_priv)) {
1810 		fw_domains = __gen8_reg_write_fw_domains(offset);
1811 	} else if (IS_GEN(dev_priv, 6, 7)) {
1812 		fw_domains = FORCEWAKE_RENDER;
1813 	} else {
1814 		WARN_ON(!IS_GEN(dev_priv, 2, 5));
1815 		fw_domains = 0;
1816 	}
1817 
1818 	WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
1819 
1820 	return fw_domains;
1821 }
1822 
1823 /**
1824  * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
1825  * 				    a register
1826  * @dev_priv: pointer to struct drm_i915_private
1827  * @reg: register in question
1828  * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
1829  *
1830  * Returns a set of forcewake domains required to be taken with for example
1831  * intel_uncore_forcewake_get for the specified register to be accessible in the
1832  * specified mode (read, write or read/write) with raw mmio accessors.
1833  *
1834  * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
1835  * callers to do FIFO management on their own or risk losing writes.
1836  */
1837 enum forcewake_domains
intel_uncore_forcewake_for_reg(struct drm_i915_private * dev_priv,i915_reg_t reg,unsigned int op)1838 intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
1839 			       i915_reg_t reg, unsigned int op)
1840 {
1841 	enum forcewake_domains fw_domains = 0;
1842 
1843 	WARN_ON(!op);
1844 
1845 	if (intel_vgpu_active(dev_priv))
1846 		return 0;
1847 
1848 	if (op & FW_REG_READ)
1849 		fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg);
1850 
1851 	if (op & FW_REG_WRITE)
1852 		fw_domains |= intel_uncore_forcewake_for_write(dev_priv, reg);
1853 
1854 	return fw_domains;
1855 }
1856 
1857 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1858 #include "selftests/mock_uncore.c"
1859 #include "selftests/intel_uncore.c"
1860 #endif
1861