• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include "i915_drv.h"
26 #include "intel_ringbuffer.h"
27 #include "intel_lrc.h"
28 
29 /* Haswell does have the CXT_SIZE register however it does not appear to be
30  * valid. Now, docs explain in dwords what is in the context object. The full
31  * size is 70720 bytes, however, the power context and execlist context will
32  * never be saved (power context is stored elsewhere, and execlists don't work
33  * on HSW) - so the final size, including the extra state required for the
34  * Resource Streamer, is 66944 bytes, which rounds to 17 pages.
35  */
36 #define HSW_CXT_TOTAL_SIZE		(17 * PAGE_SIZE)
37 /* Same as Haswell, but 72064 bytes now. */
38 #define GEN8_CXT_TOTAL_SIZE		(18 * PAGE_SIZE)
39 
40 #define GEN8_LR_CONTEXT_RENDER_SIZE	(20 * PAGE_SIZE)
41 #define GEN9_LR_CONTEXT_RENDER_SIZE	(22 * PAGE_SIZE)
42 
43 #define GEN8_LR_CONTEXT_OTHER_SIZE	( 2 * PAGE_SIZE)
44 
45 struct engine_class_info {
46 	const char *name;
47 	int (*init_legacy)(struct intel_engine_cs *engine);
48 	int (*init_execlists)(struct intel_engine_cs *engine);
49 };
50 
51 static const struct engine_class_info intel_engine_classes[] = {
52 	[RENDER_CLASS] = {
53 		.name = "rcs",
54 		.init_execlists = logical_render_ring_init,
55 		.init_legacy = intel_init_render_ring_buffer,
56 	},
57 	[COPY_ENGINE_CLASS] = {
58 		.name = "bcs",
59 		.init_execlists = logical_xcs_ring_init,
60 		.init_legacy = intel_init_blt_ring_buffer,
61 	},
62 	[VIDEO_DECODE_CLASS] = {
63 		.name = "vcs",
64 		.init_execlists = logical_xcs_ring_init,
65 		.init_legacy = intel_init_bsd_ring_buffer,
66 	},
67 	[VIDEO_ENHANCEMENT_CLASS] = {
68 		.name = "vecs",
69 		.init_execlists = logical_xcs_ring_init,
70 		.init_legacy = intel_init_vebox_ring_buffer,
71 	},
72 };
73 
74 struct engine_info {
75 	unsigned int hw_id;
76 	unsigned int uabi_id;
77 	u8 class;
78 	u8 instance;
79 	u32 mmio_base;
80 	unsigned irq_shift;
81 };
82 
83 static const struct engine_info intel_engines[] = {
84 	[RCS] = {
85 		.hw_id = RCS_HW,
86 		.uabi_id = I915_EXEC_RENDER,
87 		.class = RENDER_CLASS,
88 		.instance = 0,
89 		.mmio_base = RENDER_RING_BASE,
90 		.irq_shift = GEN8_RCS_IRQ_SHIFT,
91 	},
92 	[BCS] = {
93 		.hw_id = BCS_HW,
94 		.uabi_id = I915_EXEC_BLT,
95 		.class = COPY_ENGINE_CLASS,
96 		.instance = 0,
97 		.mmio_base = BLT_RING_BASE,
98 		.irq_shift = GEN8_BCS_IRQ_SHIFT,
99 	},
100 	[VCS] = {
101 		.hw_id = VCS_HW,
102 		.uabi_id = I915_EXEC_BSD,
103 		.class = VIDEO_DECODE_CLASS,
104 		.instance = 0,
105 		.mmio_base = GEN6_BSD_RING_BASE,
106 		.irq_shift = GEN8_VCS1_IRQ_SHIFT,
107 	},
108 	[VCS2] = {
109 		.hw_id = VCS2_HW,
110 		.uabi_id = I915_EXEC_BSD,
111 		.class = VIDEO_DECODE_CLASS,
112 		.instance = 1,
113 		.mmio_base = GEN8_BSD2_RING_BASE,
114 		.irq_shift = GEN8_VCS2_IRQ_SHIFT,
115 	},
116 	[VECS] = {
117 		.hw_id = VECS_HW,
118 		.uabi_id = I915_EXEC_VEBOX,
119 		.class = VIDEO_ENHANCEMENT_CLASS,
120 		.instance = 0,
121 		.mmio_base = VEBOX_RING_BASE,
122 		.irq_shift = GEN8_VECS_IRQ_SHIFT,
123 	},
124 };
125 
126 /**
127  * ___intel_engine_context_size() - return the size of the context for an engine
128  * @dev_priv: i915 device private
129  * @class: engine class
130  *
131  * Each engine class may require a different amount of space for a context
132  * image.
133  *
134  * Return: size (in bytes) of an engine class specific context image
135  *
136  * Note: this size includes the HWSP, which is part of the context image
137  * in LRC mode, but does not include the "shared data page" used with
138  * GuC submission. The caller should account for this if using the GuC.
139  */
140 static u32
__intel_engine_context_size(struct drm_i915_private * dev_priv,u8 class)141 __intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
142 {
143 	u32 cxt_size;
144 
145 	BUILD_BUG_ON(I915_GTT_PAGE_SIZE != PAGE_SIZE);
146 
147 	switch (class) {
148 	case RENDER_CLASS:
149 		switch (INTEL_GEN(dev_priv)) {
150 		default:
151 			MISSING_CASE(INTEL_GEN(dev_priv));
152 		case 10:
153 		case 9:
154 			return GEN9_LR_CONTEXT_RENDER_SIZE;
155 		case 8:
156 			return i915.enable_execlists ?
157 			       GEN8_LR_CONTEXT_RENDER_SIZE :
158 			       GEN8_CXT_TOTAL_SIZE;
159 		case 7:
160 			if (IS_HASWELL(dev_priv))
161 				return HSW_CXT_TOTAL_SIZE;
162 
163 			cxt_size = I915_READ(GEN7_CXT_SIZE);
164 			return round_up(GEN7_CXT_TOTAL_SIZE(cxt_size) * 64,
165 					PAGE_SIZE);
166 		case 6:
167 			cxt_size = I915_READ(CXT_SIZE);
168 			return round_up(GEN6_CXT_TOTAL_SIZE(cxt_size) * 64,
169 					PAGE_SIZE);
170 		case 5:
171 		case 4:
172 		case 3:
173 		case 2:
174 		/* For the special day when i810 gets merged. */
175 		case 1:
176 			return 0;
177 		}
178 		break;
179 	default:
180 		MISSING_CASE(class);
181 	case VIDEO_DECODE_CLASS:
182 	case VIDEO_ENHANCEMENT_CLASS:
183 	case COPY_ENGINE_CLASS:
184 		if (INTEL_GEN(dev_priv) < 8)
185 			return 0;
186 		return GEN8_LR_CONTEXT_OTHER_SIZE;
187 	}
188 }
189 
190 static int
intel_engine_setup(struct drm_i915_private * dev_priv,enum intel_engine_id id)191 intel_engine_setup(struct drm_i915_private *dev_priv,
192 		   enum intel_engine_id id)
193 {
194 	const struct engine_info *info = &intel_engines[id];
195 	const struct engine_class_info *class_info;
196 	struct intel_engine_cs *engine;
197 
198 	GEM_BUG_ON(info->class >= ARRAY_SIZE(intel_engine_classes));
199 	class_info = &intel_engine_classes[info->class];
200 
201 	GEM_BUG_ON(dev_priv->engine[id]);
202 	engine = kzalloc(sizeof(*engine), GFP_KERNEL);
203 	if (!engine)
204 		return -ENOMEM;
205 
206 	engine->id = id;
207 	engine->i915 = dev_priv;
208 	WARN_ON(snprintf(engine->name, sizeof(engine->name), "%s%u",
209 			 class_info->name, info->instance) >=
210 		sizeof(engine->name));
211 	engine->uabi_id = info->uabi_id;
212 	engine->hw_id = engine->guc_id = info->hw_id;
213 	engine->mmio_base = info->mmio_base;
214 	engine->irq_shift = info->irq_shift;
215 	engine->class = info->class;
216 	engine->instance = info->instance;
217 
218 	engine->context_size = __intel_engine_context_size(dev_priv,
219 							   engine->class);
220 	if (WARN_ON(engine->context_size > BIT(20)))
221 		engine->context_size = 0;
222 
223 	/* Nothing to do here, execute in order of dependencies */
224 	engine->schedule = NULL;
225 
226 	ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
227 
228 	dev_priv->engine[id] = engine;
229 	return 0;
230 }
231 
232 /**
233  * intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
234  * @dev_priv: i915 device private
235  *
236  * Return: non-zero if the initialization failed.
237  */
intel_engines_init_mmio(struct drm_i915_private * dev_priv)238 int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
239 {
240 	struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
241 	const unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask;
242 	struct intel_engine_cs *engine;
243 	enum intel_engine_id id;
244 	unsigned int mask = 0;
245 	unsigned int i;
246 	int err;
247 
248 	WARN_ON(ring_mask == 0);
249 	WARN_ON(ring_mask &
250 		GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES));
251 
252 	for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
253 		if (!HAS_ENGINE(dev_priv, i))
254 			continue;
255 
256 		err = intel_engine_setup(dev_priv, i);
257 		if (err)
258 			goto cleanup;
259 
260 		mask |= ENGINE_MASK(i);
261 	}
262 
263 	/*
264 	 * Catch failures to update intel_engines table when the new engines
265 	 * are added to the driver by a warning and disabling the forgotten
266 	 * engines.
267 	 */
268 	if (WARN_ON(mask != ring_mask))
269 		device_info->ring_mask = mask;
270 
271 	/* We always presume we have at least RCS available for later probing */
272 	if (WARN_ON(!HAS_ENGINE(dev_priv, RCS))) {
273 		err = -ENODEV;
274 		goto cleanup;
275 	}
276 
277 	device_info->num_rings = hweight32(mask);
278 
279 	return 0;
280 
281 cleanup:
282 	for_each_engine(engine, dev_priv, id)
283 		kfree(engine);
284 	return err;
285 }
286 
287 /**
288  * intel_engines_init() - init the Engine Command Streamers
289  * @dev_priv: i915 device private
290  *
291  * Return: non-zero if the initialization failed.
292  */
intel_engines_init(struct drm_i915_private * dev_priv)293 int intel_engines_init(struct drm_i915_private *dev_priv)
294 {
295 	struct intel_engine_cs *engine;
296 	enum intel_engine_id id, err_id;
297 	int err;
298 
299 	for_each_engine(engine, dev_priv, id) {
300 		const struct engine_class_info *class_info =
301 			&intel_engine_classes[engine->class];
302 		int (*init)(struct intel_engine_cs *engine);
303 
304 		if (i915.enable_execlists)
305 			init = class_info->init_execlists;
306 		else
307 			init = class_info->init_legacy;
308 
309 		err = -EINVAL;
310 		err_id = id;
311 
312 		if (GEM_WARN_ON(!init))
313 			goto cleanup;
314 
315 		err = init(engine);
316 		if (err)
317 			goto cleanup;
318 
319 		GEM_BUG_ON(!engine->submit_request);
320 	}
321 
322 	return 0;
323 
324 cleanup:
325 	for_each_engine(engine, dev_priv, id) {
326 		if (id >= err_id) {
327 			kfree(engine);
328 			dev_priv->engine[id] = NULL;
329 		} else {
330 			dev_priv->gt.cleanup_engine(engine);
331 		}
332 	}
333 	return err;
334 }
335 
intel_engine_init_global_seqno(struct intel_engine_cs * engine,u32 seqno)336 void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
337 {
338 	struct drm_i915_private *dev_priv = engine->i915;
339 
340 	/* Our semaphore implementation is strictly monotonic (i.e. we proceed
341 	 * so long as the semaphore value in the register/page is greater
342 	 * than the sync value), so whenever we reset the seqno,
343 	 * so long as we reset the tracking semaphore value to 0, it will
344 	 * always be before the next request's seqno. If we don't reset
345 	 * the semaphore value, then when the seqno moves backwards all
346 	 * future waits will complete instantly (causing rendering corruption).
347 	 */
348 	if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
349 		I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
350 		I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
351 		if (HAS_VEBOX(dev_priv))
352 			I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
353 	}
354 	if (dev_priv->semaphore) {
355 		struct page *page = i915_vma_first_page(dev_priv->semaphore);
356 		void *semaphores;
357 
358 		/* Semaphores are in noncoherent memory, flush to be safe */
359 		semaphores = kmap_atomic(page);
360 		memset(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
361 		       0, I915_NUM_ENGINES * gen8_semaphore_seqno_size);
362 		drm_clflush_virt_range(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
363 				       I915_NUM_ENGINES * gen8_semaphore_seqno_size);
364 		kunmap_atomic(semaphores);
365 	}
366 
367 	intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
368 	clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
369 
370 	/* After manually advancing the seqno, fake the interrupt in case
371 	 * there are any waiters for that seqno.
372 	 */
373 	intel_engine_wakeup(engine);
374 
375 	GEM_BUG_ON(intel_engine_get_seqno(engine) != seqno);
376 }
377 
intel_engine_init_timeline(struct intel_engine_cs * engine)378 static void intel_engine_init_timeline(struct intel_engine_cs *engine)
379 {
380 	engine->timeline = &engine->i915->gt.global_timeline.engine[engine->id];
381 }
382 
383 /**
384  * intel_engines_setup_common - setup engine state not requiring hw access
385  * @engine: Engine to setup.
386  *
387  * Initializes @engine@ structure members shared between legacy and execlists
388  * submission modes which do not require hardware access.
389  *
390  * Typically done early in the submission mode specific engine setup stage.
391  */
intel_engine_setup_common(struct intel_engine_cs * engine)392 void intel_engine_setup_common(struct intel_engine_cs *engine)
393 {
394 	engine->execlist_queue = RB_ROOT;
395 	engine->execlist_first = NULL;
396 
397 	intel_engine_init_timeline(engine);
398 	intel_engine_init_hangcheck(engine);
399 	i915_gem_batch_pool_init(engine, &engine->batch_pool);
400 
401 	intel_engine_init_cmd_parser(engine);
402 }
403 
intel_engine_create_scratch(struct intel_engine_cs * engine,int size)404 int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
405 {
406 	struct drm_i915_gem_object *obj;
407 	struct i915_vma *vma;
408 	int ret;
409 
410 	WARN_ON(engine->scratch);
411 
412 	obj = i915_gem_object_create_stolen(engine->i915, size);
413 	if (!obj)
414 		obj = i915_gem_object_create_internal(engine->i915, size);
415 	if (IS_ERR(obj)) {
416 		DRM_ERROR("Failed to allocate scratch page\n");
417 		return PTR_ERR(obj);
418 	}
419 
420 	vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
421 	if (IS_ERR(vma)) {
422 		ret = PTR_ERR(vma);
423 		goto err_unref;
424 	}
425 
426 	ret = i915_vma_pin(vma, 0, 4096, PIN_GLOBAL | PIN_HIGH);
427 	if (ret)
428 		goto err_unref;
429 
430 	engine->scratch = vma;
431 	DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
432 			 engine->name, i915_ggtt_offset(vma));
433 	return 0;
434 
435 err_unref:
436 	i915_gem_object_put(obj);
437 	return ret;
438 }
439 
intel_engine_cleanup_scratch(struct intel_engine_cs * engine)440 static void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
441 {
442 	i915_vma_unpin_and_release(&engine->scratch);
443 }
444 
445 /**
446  * intel_engines_init_common - initialize cengine state which might require hw access
447  * @engine: Engine to initialize.
448  *
449  * Initializes @engine@ structure members shared between legacy and execlists
450  * submission modes which do require hardware access.
451  *
452  * Typcally done at later stages of submission mode specific engine setup.
453  *
454  * Returns zero on success or an error code on failure.
455  */
intel_engine_init_common(struct intel_engine_cs * engine)456 int intel_engine_init_common(struct intel_engine_cs *engine)
457 {
458 	struct intel_ring *ring;
459 	int ret;
460 
461 	engine->set_default_submission(engine);
462 
463 	/* We may need to do things with the shrinker which
464 	 * require us to immediately switch back to the default
465 	 * context. This can cause a problem as pinning the
466 	 * default context also requires GTT space which may not
467 	 * be available. To avoid this we always pin the default
468 	 * context.
469 	 */
470 	ring = engine->context_pin(engine, engine->i915->kernel_context);
471 	if (IS_ERR(ring))
472 		return PTR_ERR(ring);
473 
474 	ret = intel_engine_init_breadcrumbs(engine);
475 	if (ret)
476 		goto err_unpin;
477 
478 	ret = i915_gem_render_state_init(engine);
479 	if (ret)
480 		goto err_unpin;
481 
482 	return 0;
483 
484 err_unpin:
485 	engine->context_unpin(engine, engine->i915->kernel_context);
486 	return ret;
487 }
488 
489 /**
490  * intel_engines_cleanup_common - cleans up the engine state created by
491  *                                the common initiailizers.
492  * @engine: Engine to cleanup.
493  *
494  * This cleans up everything created by the common helpers.
495  */
intel_engine_cleanup_common(struct intel_engine_cs * engine)496 void intel_engine_cleanup_common(struct intel_engine_cs *engine)
497 {
498 	intel_engine_cleanup_scratch(engine);
499 
500 	i915_gem_render_state_fini(engine);
501 	intel_engine_fini_breadcrumbs(engine);
502 	intel_engine_cleanup_cmd_parser(engine);
503 	i915_gem_batch_pool_fini(&engine->batch_pool);
504 
505 	engine->context_unpin(engine, engine->i915->kernel_context);
506 }
507 
intel_engine_get_active_head(struct intel_engine_cs * engine)508 u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
509 {
510 	struct drm_i915_private *dev_priv = engine->i915;
511 	u64 acthd;
512 
513 	if (INTEL_GEN(dev_priv) >= 8)
514 		acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
515 					 RING_ACTHD_UDW(engine->mmio_base));
516 	else if (INTEL_GEN(dev_priv) >= 4)
517 		acthd = I915_READ(RING_ACTHD(engine->mmio_base));
518 	else
519 		acthd = I915_READ(ACTHD);
520 
521 	return acthd;
522 }
523 
intel_engine_get_last_batch_head(struct intel_engine_cs * engine)524 u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine)
525 {
526 	struct drm_i915_private *dev_priv = engine->i915;
527 	u64 bbaddr;
528 
529 	if (INTEL_GEN(dev_priv) >= 8)
530 		bbaddr = I915_READ64_2x32(RING_BBADDR(engine->mmio_base),
531 					  RING_BBADDR_UDW(engine->mmio_base));
532 	else
533 		bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
534 
535 	return bbaddr;
536 }
537 
i915_cache_level_str(struct drm_i915_private * i915,int type)538 const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
539 {
540 	switch (type) {
541 	case I915_CACHE_NONE: return " uncached";
542 	case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
543 	case I915_CACHE_L3_LLC: return " L3+LLC";
544 	case I915_CACHE_WT: return " WT";
545 	default: return "";
546 	}
547 }
548 
549 static inline uint32_t
read_subslice_reg(struct drm_i915_private * dev_priv,int slice,int subslice,i915_reg_t reg)550 read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
551 		  int subslice, i915_reg_t reg)
552 {
553 	uint32_t mcr;
554 	uint32_t ret;
555 	enum forcewake_domains fw_domains;
556 
557 	fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg,
558 						    FW_REG_READ);
559 	fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
560 						     GEN8_MCR_SELECTOR,
561 						     FW_REG_READ | FW_REG_WRITE);
562 
563 	spin_lock_irq(&dev_priv->uncore.lock);
564 	intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
565 
566 	mcr = I915_READ_FW(GEN8_MCR_SELECTOR);
567 	/*
568 	 * The HW expects the slice and sublice selectors to be reset to 0
569 	 * after reading out the registers.
570 	 */
571 	WARN_ON_ONCE(mcr & (GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK));
572 	mcr &= ~(GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK);
573 	mcr |= GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
574 	I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
575 
576 	ret = I915_READ_FW(reg);
577 
578 	mcr &= ~(GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK);
579 	I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
580 
581 	intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
582 	spin_unlock_irq(&dev_priv->uncore.lock);
583 
584 	return ret;
585 }
586 
587 /* NB: please notice the memset */
intel_engine_get_instdone(struct intel_engine_cs * engine,struct intel_instdone * instdone)588 void intel_engine_get_instdone(struct intel_engine_cs *engine,
589 			       struct intel_instdone *instdone)
590 {
591 	struct drm_i915_private *dev_priv = engine->i915;
592 	u32 mmio_base = engine->mmio_base;
593 	int slice;
594 	int subslice;
595 
596 	memset(instdone, 0, sizeof(*instdone));
597 
598 	switch (INTEL_GEN(dev_priv)) {
599 	default:
600 		instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
601 
602 		if (engine->id != RCS)
603 			break;
604 
605 		instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
606 		for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
607 			instdone->sampler[slice][subslice] =
608 				read_subslice_reg(dev_priv, slice, subslice,
609 						  GEN7_SAMPLER_INSTDONE);
610 			instdone->row[slice][subslice] =
611 				read_subslice_reg(dev_priv, slice, subslice,
612 						  GEN7_ROW_INSTDONE);
613 		}
614 		break;
615 	case 7:
616 		instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
617 
618 		if (engine->id != RCS)
619 			break;
620 
621 		instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
622 		instdone->sampler[0][0] = I915_READ(GEN7_SAMPLER_INSTDONE);
623 		instdone->row[0][0] = I915_READ(GEN7_ROW_INSTDONE);
624 
625 		break;
626 	case 6:
627 	case 5:
628 	case 4:
629 		instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
630 
631 		if (engine->id == RCS)
632 			/* HACK: Using the wrong struct member */
633 			instdone->slice_common = I915_READ(GEN4_INSTDONE1);
634 		break;
635 	case 3:
636 	case 2:
637 		instdone->instdone = I915_READ(GEN2_INSTDONE);
638 		break;
639 	}
640 }
641 
wa_add(struct drm_i915_private * dev_priv,i915_reg_t addr,const u32 mask,const u32 val)642 static int wa_add(struct drm_i915_private *dev_priv,
643 		  i915_reg_t addr,
644 		  const u32 mask, const u32 val)
645 {
646 	const u32 idx = dev_priv->workarounds.count;
647 
648 	if (WARN_ON(idx >= I915_MAX_WA_REGS))
649 		return -ENOSPC;
650 
651 	dev_priv->workarounds.reg[idx].addr = addr;
652 	dev_priv->workarounds.reg[idx].value = val;
653 	dev_priv->workarounds.reg[idx].mask = mask;
654 
655 	dev_priv->workarounds.count++;
656 
657 	return 0;
658 }
659 
660 #define WA_REG(addr, mask, val) do { \
661 		const int r = wa_add(dev_priv, (addr), (mask), (val)); \
662 		if (r) \
663 			return r; \
664 	} while (0)
665 
666 #define WA_SET_BIT_MASKED(addr, mask) \
667 	WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
668 
669 #define WA_CLR_BIT_MASKED(addr, mask) \
670 	WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
671 
672 #define WA_SET_FIELD_MASKED(addr, mask, value) \
673 	WA_REG(addr, mask, _MASKED_FIELD(mask, value))
674 
675 #define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask))
676 #define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask))
677 
678 #define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
679 
wa_ring_whitelist_reg(struct intel_engine_cs * engine,i915_reg_t reg)680 static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
681 				 i915_reg_t reg)
682 {
683 	struct drm_i915_private *dev_priv = engine->i915;
684 	struct i915_workarounds *wa = &dev_priv->workarounds;
685 	const uint32_t index = wa->hw_whitelist_count[engine->id];
686 
687 	if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
688 		return -EINVAL;
689 
690 	WA_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
691 		 i915_mmio_reg_offset(reg));
692 	wa->hw_whitelist_count[engine->id]++;
693 
694 	return 0;
695 }
696 
gen8_init_workarounds(struct intel_engine_cs * engine)697 static int gen8_init_workarounds(struct intel_engine_cs *engine)
698 {
699 	struct drm_i915_private *dev_priv = engine->i915;
700 
701 	WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
702 
703 	/* WaDisableAsyncFlipPerfMode:bdw,chv */
704 	WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
705 
706 	/* WaDisablePartialInstShootdown:bdw,chv */
707 	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
708 			  PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
709 
710 	/* Use Force Non-Coherent whenever executing a 3D context. This is a
711 	 * workaround for for a possible hang in the unlikely event a TLB
712 	 * invalidation occurs during a PSD flush.
713 	 */
714 	/* WaForceEnableNonCoherent:bdw,chv */
715 	/* WaHdcDisableFetchWhenMasked:bdw,chv */
716 	WA_SET_BIT_MASKED(HDC_CHICKEN0,
717 			  HDC_DONOT_FETCH_MEM_WHEN_MASKED |
718 			  HDC_FORCE_NON_COHERENT);
719 
720 	/* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
721 	 * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
722 	 *  polygons in the same 8x4 pixel/sample area to be processed without
723 	 *  stalling waiting for the earlier ones to write to Hierarchical Z
724 	 *  buffer."
725 	 *
726 	 * This optimization is off by default for BDW and CHV; turn it on.
727 	 */
728 	WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
729 
730 	/* Wa4x4STCOptimizationDisable:bdw,chv */
731 	WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
732 
733 	/*
734 	 * BSpec recommends 8x4 when MSAA is used,
735 	 * however in practice 16x4 seems fastest.
736 	 *
737 	 * Note that PS/WM thread counts depend on the WIZ hashing
738 	 * disable bit, which we don't touch here, but it's good
739 	 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
740 	 */
741 	WA_SET_FIELD_MASKED(GEN7_GT_MODE,
742 			    GEN6_WIZ_HASHING_MASK,
743 			    GEN6_WIZ_HASHING_16x4);
744 
745 	return 0;
746 }
747 
bdw_init_workarounds(struct intel_engine_cs * engine)748 static int bdw_init_workarounds(struct intel_engine_cs *engine)
749 {
750 	struct drm_i915_private *dev_priv = engine->i915;
751 	int ret;
752 
753 	ret = gen8_init_workarounds(engine);
754 	if (ret)
755 		return ret;
756 
757 	/* WaDisableThreadStallDopClockGating:bdw (pre-production) */
758 	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
759 
760 	/* WaDisableDopClockGating:bdw
761 	 *
762 	 * Also see the related UCGTCL1 write in broadwell_init_clock_gating()
763 	 * to disable EUTC clock gating.
764 	 */
765 	WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
766 			  DOP_CLOCK_GATING_DISABLE);
767 
768 	WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
769 			  GEN8_SAMPLER_POWER_BYPASS_DIS);
770 
771 	WA_SET_BIT_MASKED(HDC_CHICKEN0,
772 			  /* WaForceContextSaveRestoreNonCoherent:bdw */
773 			  HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
774 			  /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
775 			  (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
776 
777 	return 0;
778 }
779 
chv_init_workarounds(struct intel_engine_cs * engine)780 static int chv_init_workarounds(struct intel_engine_cs *engine)
781 {
782 	struct drm_i915_private *dev_priv = engine->i915;
783 	int ret;
784 
785 	ret = gen8_init_workarounds(engine);
786 	if (ret)
787 		return ret;
788 
789 	/* WaDisableThreadStallDopClockGating:chv */
790 	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
791 
792 	/* Improve HiZ throughput on CHV. */
793 	WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
794 
795 	return 0;
796 }
797 
gen9_init_workarounds(struct intel_engine_cs * engine)798 static int gen9_init_workarounds(struct intel_engine_cs *engine)
799 {
800 	struct drm_i915_private *dev_priv = engine->i915;
801 	int ret;
802 
803 	/* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
804 	I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
805 
806 	/* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
807 	I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
808 		   GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
809 
810 	/* WaDisableKillLogic:bxt,skl,kbl */
811 	if (!IS_COFFEELAKE(dev_priv))
812 		I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
813 			   ECOCHK_DIS_TLB);
814 
815 	/* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
816 	/* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
817 	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
818 			  FLOW_CONTROL_ENABLE |
819 			  PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
820 
821 	/* Syncing dependencies between camera and graphics:skl,bxt,kbl */
822 	if (!IS_COFFEELAKE(dev_priv))
823 		WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
824 				  GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
825 
826 	/* WaDisableDgMirrorFixInHalfSliceChicken5:bxt */
827 	if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
828 		WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
829 				  GEN9_DG_MIRROR_FIX_ENABLE);
830 
831 	/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:bxt */
832 	if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
833 		WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
834 				  GEN9_RHWO_OPTIMIZATION_DISABLE);
835 		/*
836 		 * WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set
837 		 * but we do that in per ctx batchbuffer as there is an issue
838 		 * with this register not getting restored on ctx restore
839 		 */
840 	}
841 
842 	/* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
843 	/* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
844 	WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
845 			  GEN9_ENABLE_YV12_BUGFIX |
846 			  GEN9_ENABLE_GPGPU_PREEMPTION);
847 
848 	/* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */
849 	/* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */
850 	WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
851 					 GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
852 
853 	/* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */
854 	WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
855 			  GEN9_CCS_TLB_PREFETCH_ENABLE);
856 
857 	/* WaDisableMaskBasedCammingInRCC:bxt */
858 	if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
859 		WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
860 				  PIXEL_MASK_CAMMING_DISABLE);
861 
862 	/* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */
863 	WA_SET_BIT_MASKED(HDC_CHICKEN0,
864 			  HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
865 			  HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
866 
867 	/* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
868 	 * both tied to WaForceContextSaveRestoreNonCoherent
869 	 * in some hsds for skl. We keep the tie for all gen9. The
870 	 * documentation is a bit hazy and so we want to get common behaviour,
871 	 * even though there is no clear evidence we would need both on kbl/bxt.
872 	 * This area has been source of system hangs so we play it safe
873 	 * and mimic the skl regardless of what bspec says.
874 	 *
875 	 * Use Force Non-Coherent whenever executing a 3D context. This
876 	 * is a workaround for a possible hang in the unlikely event
877 	 * a TLB invalidation occurs during a PSD flush.
878 	 */
879 
880 	/* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */
881 	WA_SET_BIT_MASKED(HDC_CHICKEN0,
882 			  HDC_FORCE_NON_COHERENT);
883 
884 	/* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
885 	I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
886 		   BDW_DISABLE_HDC_INVALIDATION);
887 
888 	/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
889 	if (IS_SKYLAKE(dev_priv) ||
890 	    IS_KABYLAKE(dev_priv) ||
891 	    IS_COFFEELAKE(dev_priv) ||
892 	    IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
893 		WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
894 				  GEN8_SAMPLER_POWER_BYPASS_DIS);
895 
896 	/* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */
897 	WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
898 
899 	/* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
900 	I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
901 				    GEN8_LQSC_FLUSH_COHERENT_LINES));
902 
903 	/* WaClearHIZ_WM_CHICKEN3:bxt,glk */
904 	if (IS_GEN9_LP(dev_priv))
905 		WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
906 
907 	/* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
908 	ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
909 	if (ret)
910 		return ret;
911 
912 	/* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl */
913 	ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
914 	if (ret)
915 		return ret;
916 
917 	/* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
918 	ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
919 	if (ret)
920 		return ret;
921 
922 	return 0;
923 }
924 
skl_tune_iz_hashing(struct intel_engine_cs * engine)925 static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
926 {
927 	struct drm_i915_private *dev_priv = engine->i915;
928 	u8 vals[3] = { 0, 0, 0 };
929 	unsigned int i;
930 
931 	for (i = 0; i < 3; i++) {
932 		u8 ss;
933 
934 		/*
935 		 * Only consider slices where one, and only one, subslice has 7
936 		 * EUs
937 		 */
938 		if (!is_power_of_2(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]))
939 			continue;
940 
941 		/*
942 		 * subslice_7eu[i] != 0 (because of the check above) and
943 		 * ss_max == 4 (maximum number of subslices possible per slice)
944 		 *
945 		 * ->    0 <= ss <= 3;
946 		 */
947 		ss = ffs(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]) - 1;
948 		vals[i] = 3 - ss;
949 	}
950 
951 	if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
952 		return 0;
953 
954 	/* Tune IZ hashing. See intel_device_info_runtime_init() */
955 	WA_SET_FIELD_MASKED(GEN7_GT_MODE,
956 			    GEN9_IZ_HASHING_MASK(2) |
957 			    GEN9_IZ_HASHING_MASK(1) |
958 			    GEN9_IZ_HASHING_MASK(0),
959 			    GEN9_IZ_HASHING(2, vals[2]) |
960 			    GEN9_IZ_HASHING(1, vals[1]) |
961 			    GEN9_IZ_HASHING(0, vals[0]));
962 
963 	return 0;
964 }
965 
skl_init_workarounds(struct intel_engine_cs * engine)966 static int skl_init_workarounds(struct intel_engine_cs *engine)
967 {
968 	struct drm_i915_private *dev_priv = engine->i915;
969 	int ret;
970 
971 	ret = gen9_init_workarounds(engine);
972 	if (ret)
973 		return ret;
974 
975 	/*
976 	 * Actual WA is to disable percontext preemption granularity control
977 	 * until D0 which is the default case so this is equivalent to
978 	 * !WaDisablePerCtxtPreemptionGranularityControl:skl
979 	 */
980 	I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
981 		   _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
982 
983 	/* WaEnableGapsTsvCreditFix:skl */
984 	I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
985 				   GEN9_GAPS_TSV_CREDIT_DISABLE));
986 
987 	/* WaDisableGafsUnitClkGating:skl */
988 	WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
989 
990 	/* WaInPlaceDecompressionHang:skl */
991 	if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
992 		WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
993 			   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
994 
995 	/* WaDisableLSQCROPERFforOCL:skl */
996 	ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
997 	if (ret)
998 		return ret;
999 
1000 	return skl_tune_iz_hashing(engine);
1001 }
1002 
bxt_init_workarounds(struct intel_engine_cs * engine)1003 static int bxt_init_workarounds(struct intel_engine_cs *engine)
1004 {
1005 	struct drm_i915_private *dev_priv = engine->i915;
1006 	int ret;
1007 
1008 	ret = gen9_init_workarounds(engine);
1009 	if (ret)
1010 		return ret;
1011 
1012 	/* WaStoreMultiplePTEenable:bxt */
1013 	/* This is a requirement according to Hardware specification */
1014 	if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
1015 		I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
1016 
1017 	/* WaSetClckGatingDisableMedia:bxt */
1018 	if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
1019 		I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
1020 					    ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
1021 	}
1022 
1023 	/* WaDisableThreadStallDopClockGating:bxt */
1024 	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
1025 			  STALL_DOP_GATING_DISABLE);
1026 
1027 	/* WaDisablePooledEuLoadBalancingFix:bxt */
1028 	if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
1029 		WA_SET_BIT_MASKED(FF_SLICE_CS_CHICKEN2,
1030 				  GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
1031 	}
1032 
1033 	/* WaDisableSbeCacheDispatchPortSharing:bxt */
1034 	if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) {
1035 		WA_SET_BIT_MASKED(
1036 			GEN7_HALF_SLICE_CHICKEN1,
1037 			GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1038 	}
1039 
1040 	/* WaDisableObjectLevelPreemptionForTrifanOrPolygon:bxt */
1041 	/* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
1042 	/* WaDisableObjectLevelPreemtionForInstanceId:bxt */
1043 	/* WaDisableLSQCROPERFforOCL:bxt */
1044 	if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
1045 		ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
1046 		if (ret)
1047 			return ret;
1048 
1049 		ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
1050 		if (ret)
1051 			return ret;
1052 	}
1053 
1054 	/* WaProgramL3SqcReg1DefaultForPerf:bxt */
1055 	if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
1056 		u32 val = I915_READ(GEN8_L3SQCREG1);
1057 		val &= ~L3_PRIO_CREDITS_MASK;
1058 		val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
1059 		I915_WRITE(GEN8_L3SQCREG1, val);
1060 	}
1061 
1062 	/* WaToEnableHwFixForPushConstHWBug:bxt */
1063 	if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
1064 		WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1065 				  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1066 
1067 	/* WaInPlaceDecompressionHang:bxt */
1068 	if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
1069 		WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
1070 			   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1071 
1072 	return 0;
1073 }
1074 
kbl_init_workarounds(struct intel_engine_cs * engine)1075 static int kbl_init_workarounds(struct intel_engine_cs *engine)
1076 {
1077 	struct drm_i915_private *dev_priv = engine->i915;
1078 	int ret;
1079 
1080 	ret = gen9_init_workarounds(engine);
1081 	if (ret)
1082 		return ret;
1083 
1084 	/* WaEnableGapsTsvCreditFix:kbl */
1085 	I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
1086 				   GEN9_GAPS_TSV_CREDIT_DISABLE));
1087 
1088 	/* WaDisableDynamicCreditSharing:kbl */
1089 	if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
1090 		WA_SET_BIT(GAMT_CHKN_BIT_REG,
1091 			   GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
1092 
1093 	/* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
1094 	if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
1095 		WA_SET_BIT_MASKED(HDC_CHICKEN0,
1096 				  HDC_FENCE_DEST_SLM_DISABLE);
1097 
1098 	/* WaToEnableHwFixForPushConstHWBug:kbl */
1099 	if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
1100 		WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1101 				  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1102 
1103 	/* WaDisableGafsUnitClkGating:kbl */
1104 	WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1105 
1106 	/* WaDisableSbeCacheDispatchPortSharing:kbl */
1107 	WA_SET_BIT_MASKED(
1108 		GEN7_HALF_SLICE_CHICKEN1,
1109 		GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1110 
1111 	/* WaInPlaceDecompressionHang:kbl */
1112 	WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
1113 		   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1114 
1115 	/* WaDisableLSQCROPERFforOCL:kbl */
1116 	ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
1117 	if (ret)
1118 		return ret;
1119 
1120 	return 0;
1121 }
1122 
glk_init_workarounds(struct intel_engine_cs * engine)1123 static int glk_init_workarounds(struct intel_engine_cs *engine)
1124 {
1125 	struct drm_i915_private *dev_priv = engine->i915;
1126 	int ret;
1127 
1128 	ret = gen9_init_workarounds(engine);
1129 	if (ret)
1130 		return ret;
1131 
1132 	/* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
1133 	ret = wa_ring_whitelist_reg(engine, GEN9_SLICE_COMMON_ECO_CHICKEN1);
1134 	if (ret)
1135 		return ret;
1136 
1137 	/* WaToEnableHwFixForPushConstHWBug:glk */
1138 	WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1139 			  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1140 
1141 	return 0;
1142 }
1143 
cfl_init_workarounds(struct intel_engine_cs * engine)1144 static int cfl_init_workarounds(struct intel_engine_cs *engine)
1145 {
1146 	struct drm_i915_private *dev_priv = engine->i915;
1147 	int ret;
1148 
1149 	ret = gen9_init_workarounds(engine);
1150 	if (ret)
1151 		return ret;
1152 
1153 	/* WaEnableGapsTsvCreditFix:cfl */
1154 	I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
1155 				   GEN9_GAPS_TSV_CREDIT_DISABLE));
1156 
1157 	/* WaToEnableHwFixForPushConstHWBug:cfl */
1158 	WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1159 			  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1160 
1161 	/* WaDisableGafsUnitClkGating:cfl */
1162 	WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1163 
1164 	/* WaDisableSbeCacheDispatchPortSharing:cfl */
1165 	WA_SET_BIT_MASKED(
1166 		GEN7_HALF_SLICE_CHICKEN1,
1167 		GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1168 
1169 	/* WaInPlaceDecompressionHang:cfl */
1170 	WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
1171 		   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1172 
1173 	return 0;
1174 }
1175 
init_workarounds_ring(struct intel_engine_cs * engine)1176 int init_workarounds_ring(struct intel_engine_cs *engine)
1177 {
1178 	struct drm_i915_private *dev_priv = engine->i915;
1179 	int err;
1180 
1181 	WARN_ON(engine->id != RCS);
1182 
1183 	dev_priv->workarounds.count = 0;
1184 	dev_priv->workarounds.hw_whitelist_count[engine->id] = 0;
1185 
1186 	if (IS_BROADWELL(dev_priv))
1187 		err = bdw_init_workarounds(engine);
1188 	else if (IS_CHERRYVIEW(dev_priv))
1189 		err = chv_init_workarounds(engine);
1190 	else if (IS_SKYLAKE(dev_priv))
1191 		err =  skl_init_workarounds(engine);
1192 	else if (IS_BROXTON(dev_priv))
1193 		err = bxt_init_workarounds(engine);
1194 	else if (IS_KABYLAKE(dev_priv))
1195 		err = kbl_init_workarounds(engine);
1196 	else if (IS_GEMINILAKE(dev_priv))
1197 		err =  glk_init_workarounds(engine);
1198 	else if (IS_COFFEELAKE(dev_priv))
1199 		err = cfl_init_workarounds(engine);
1200 	else
1201 		err = 0;
1202 	if (err)
1203 		return err;
1204 
1205 	DRM_DEBUG_DRIVER("%s: Number of context specific w/a: %d\n",
1206 			 engine->name, dev_priv->workarounds.count);
1207 	return 0;
1208 }
1209 
intel_ring_workarounds_emit(struct drm_i915_gem_request * req)1210 int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
1211 {
1212 	struct i915_workarounds *w = &req->i915->workarounds;
1213 	u32 *cs;
1214 	int ret, i;
1215 
1216 	if (w->count == 0)
1217 		return 0;
1218 
1219 	ret = req->engine->emit_flush(req, EMIT_BARRIER);
1220 	if (ret)
1221 		return ret;
1222 
1223 	cs = intel_ring_begin(req, (w->count * 2 + 2));
1224 	if (IS_ERR(cs))
1225 		return PTR_ERR(cs);
1226 
1227 	*cs++ = MI_LOAD_REGISTER_IMM(w->count);
1228 	for (i = 0; i < w->count; i++) {
1229 		*cs++ = i915_mmio_reg_offset(w->reg[i].addr);
1230 		*cs++ = w->reg[i].value;
1231 	}
1232 	*cs++ = MI_NOOP;
1233 
1234 	intel_ring_advance(req, cs);
1235 
1236 	ret = req->engine->emit_flush(req, EMIT_BARRIER);
1237 	if (ret)
1238 		return ret;
1239 
1240 	return 0;
1241 }
1242 
ring_is_idle(struct intel_engine_cs * engine)1243 static bool ring_is_idle(struct intel_engine_cs *engine)
1244 {
1245 	struct drm_i915_private *dev_priv = engine->i915;
1246 	bool idle = true;
1247 
1248 	intel_runtime_pm_get(dev_priv);
1249 
1250 	/* First check that no commands are left in the ring */
1251 	if ((I915_READ_HEAD(engine) & HEAD_ADDR) !=
1252 	    (I915_READ_TAIL(engine) & TAIL_ADDR))
1253 		idle = false;
1254 
1255 	/* No bit for gen2, so assume the CS parser is idle */
1256 	if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
1257 		idle = false;
1258 
1259 	intel_runtime_pm_put(dev_priv);
1260 
1261 	return idle;
1262 }
1263 
1264 /**
1265  * intel_engine_is_idle() - Report if the engine has finished process all work
1266  * @engine: the intel_engine_cs
1267  *
1268  * Return true if there are no requests pending, nothing left to be submitted
1269  * to hardware, and that the engine is idle.
1270  */
intel_engine_is_idle(struct intel_engine_cs * engine)1271 bool intel_engine_is_idle(struct intel_engine_cs *engine)
1272 {
1273 	struct drm_i915_private *dev_priv = engine->i915;
1274 
1275 	/* More white lies, if wedged, hw state is inconsistent */
1276 	if (i915_terminally_wedged(&dev_priv->gpu_error))
1277 		return true;
1278 
1279 	/* Any inflight/incomplete requests? */
1280 	if (!i915_seqno_passed(intel_engine_get_seqno(engine),
1281 			       intel_engine_last_submit(engine)))
1282 		return false;
1283 
1284 	if (I915_SELFTEST_ONLY(engine->breadcrumbs.mock))
1285 		return true;
1286 
1287 	/* Interrupt/tasklet pending? */
1288 	if (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted))
1289 		return false;
1290 
1291 	/* Both ports drained, no more ELSP submission? */
1292 	if (port_request(&engine->execlist_port[0]))
1293 		return false;
1294 
1295 	/* ELSP is empty, but there are ready requests? */
1296 	if (READ_ONCE(engine->execlist_first))
1297 		return false;
1298 
1299 	/* Ring stopped? */
1300 	if (!ring_is_idle(engine))
1301 		return false;
1302 
1303 	return true;
1304 }
1305 
intel_engines_are_idle(struct drm_i915_private * dev_priv)1306 bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
1307 {
1308 	struct intel_engine_cs *engine;
1309 	enum intel_engine_id id;
1310 
1311 	if (READ_ONCE(dev_priv->gt.active_requests))
1312 		return false;
1313 
1314 	/* If the driver is wedged, HW state may be very inconsistent and
1315 	 * report that it is still busy, even though we have stopped using it.
1316 	 */
1317 	if (i915_terminally_wedged(&dev_priv->gpu_error))
1318 		return true;
1319 
1320 	for_each_engine(engine, dev_priv, id) {
1321 		if (!intel_engine_is_idle(engine))
1322 			return false;
1323 	}
1324 
1325 	return true;
1326 }
1327 
intel_engines_reset_default_submission(struct drm_i915_private * i915)1328 void intel_engines_reset_default_submission(struct drm_i915_private *i915)
1329 {
1330 	struct intel_engine_cs *engine;
1331 	enum intel_engine_id id;
1332 
1333 	for_each_engine(engine, i915, id)
1334 		engine->set_default_submission(engine);
1335 }
1336 
intel_engines_mark_idle(struct drm_i915_private * i915)1337 void intel_engines_mark_idle(struct drm_i915_private *i915)
1338 {
1339 	struct intel_engine_cs *engine;
1340 	enum intel_engine_id id;
1341 
1342 	for_each_engine(engine, i915, id) {
1343 		intel_engine_disarm_breadcrumbs(engine);
1344 		i915_gem_batch_pool_fini(&engine->batch_pool);
1345 		tasklet_kill(&engine->irq_tasklet);
1346 		engine->no_priolist = false;
1347 	}
1348 }
1349 
1350 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1351 #include "selftests/mock_engine.c"
1352 #endif
1353