• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2012 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eugeni Dodonov <eugeni.dodonov@intel.com>
25  *
26  */
27 
28 #include <linux/module.h>
29 #include <linux/pm_runtime.h>
30 
31 #include <drm/drm_atomic_helper.h>
32 #include <drm/drm_fourcc.h>
33 #include <drm/drm_plane_helper.h>
34 
35 #include "display/intel_atomic.h"
36 #include "display/intel_bw.h"
37 #include "display/intel_display_types.h"
38 #include "display/intel_fbc.h"
39 #include "display/intel_sprite.h"
40 
41 #include "gt/intel_llc.h"
42 
43 #include "i915_drv.h"
44 #include "i915_fixed.h"
45 #include "i915_irq.h"
46 #include "i915_trace.h"
47 #include "intel_pm.h"
48 #include "intel_sideband.h"
49 #include "../../../platform/x86/intel_ips.h"
50 
51 /* Stores plane specific WM parameters */
52 struct skl_wm_params {
53 	bool x_tiled, y_tiled;
54 	bool rc_surface;
55 	bool is_planar;
56 	u32 width;
57 	u8 cpp;
58 	u32 plane_pixel_rate;
59 	u32 y_min_scanlines;
60 	u32 plane_bytes_per_line;
61 	uint_fixed_16_16_t plane_blocks_per_line;
62 	uint_fixed_16_16_t y_tile_minimum;
63 	u32 linetime_us;
64 	u32 dbuf_block_size;
65 };
66 
67 /* used in computing the new watermarks state */
68 struct intel_wm_config {
69 	unsigned int num_pipes_active;
70 	bool sprites_enabled;
71 	bool sprites_scaled;
72 };
73 
gen9_init_clock_gating(struct drm_i915_private * dev_priv)74 static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
75 {
76 	if (HAS_LLC(dev_priv)) {
77 		/*
78 		 * WaCompressedResourceDisplayNewHashMode:skl,kbl
79 		 * Display WA #0390: skl,kbl
80 		 *
81 		 * Must match Sampler, Pixel Back End, and Media. See
82 		 * WaCompressedResourceSamplerPbeMediaNewHashMode.
83 		 */
84 		I915_WRITE(CHICKEN_PAR1_1,
85 			   I915_READ(CHICKEN_PAR1_1) |
86 			   SKL_DE_COMPRESSED_HASH_MODE);
87 	}
88 
89 	/* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl,cfl */
90 	I915_WRITE(CHICKEN_PAR1_1,
91 		   I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
92 
93 	/* WaEnableChickenDCPR:skl,bxt,kbl,glk,cfl */
94 	I915_WRITE(GEN8_CHICKEN_DCPR_1,
95 		   I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
96 
97 	/*
98 	 * WaFbcWakeMemOn:skl,bxt,kbl,glk,cfl
99 	 * Display WA #0859: skl,bxt,kbl,glk,cfl
100 	 */
101 	I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
102 		   DISP_FBC_MEMORY_WAKE);
103 }
104 
bxt_init_clock_gating(struct drm_i915_private * dev_priv)105 static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
106 {
107 	gen9_init_clock_gating(dev_priv);
108 
109 	/* WaDisableSDEUnitClockGating:bxt */
110 	I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
111 		   GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
112 
113 	/*
114 	 * FIXME:
115 	 * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
116 	 */
117 	I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
118 		   GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
119 
120 	/*
121 	 * Wa: Backlight PWM may stop in the asserted state, causing backlight
122 	 * to stay fully on.
123 	 */
124 	I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
125 		   PWM1_GATING_DIS | PWM2_GATING_DIS);
126 
127 	/*
128 	 * Lower the display internal timeout.
129 	 * This is needed to avoid any hard hangs when DSI port PLL
130 	 * is off and a MMIO access is attempted by any privilege
131 	 * application, using batch buffers or any other means.
132 	 */
133 	I915_WRITE(RM_TIMEOUT, MMIO_TIMEOUT_US(950));
134 
135 	/*
136 	 * WaFbcTurnOffFbcWatermark:bxt
137 	 * Display WA #0562: bxt
138 	 */
139 	I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
140 		   DISP_FBC_WM_DIS);
141 
142 	/*
143 	 * WaFbcHighMemBwCorruptionAvoidance:bxt
144 	 * Display WA #0883: bxt
145 	 */
146 	I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
147 		   ILK_DPFC_DISABLE_DUMMY0);
148 }
149 
glk_init_clock_gating(struct drm_i915_private * dev_priv)150 static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
151 {
152 	gen9_init_clock_gating(dev_priv);
153 
154 	/*
155 	 * WaDisablePWMClockGating:glk
156 	 * Backlight PWM may stop in the asserted state, causing backlight
157 	 * to stay fully on.
158 	 */
159 	I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
160 		   PWM1_GATING_DIS | PWM2_GATING_DIS);
161 }
162 
pnv_get_mem_freq(struct drm_i915_private * dev_priv)163 static void pnv_get_mem_freq(struct drm_i915_private *dev_priv)
164 {
165 	u32 tmp;
166 
167 	tmp = I915_READ(CLKCFG);
168 
169 	switch (tmp & CLKCFG_FSB_MASK) {
170 	case CLKCFG_FSB_533:
171 		dev_priv->fsb_freq = 533; /* 133*4 */
172 		break;
173 	case CLKCFG_FSB_800:
174 		dev_priv->fsb_freq = 800; /* 200*4 */
175 		break;
176 	case CLKCFG_FSB_667:
177 		dev_priv->fsb_freq =  667; /* 167*4 */
178 		break;
179 	case CLKCFG_FSB_400:
180 		dev_priv->fsb_freq = 400; /* 100*4 */
181 		break;
182 	}
183 
184 	switch (tmp & CLKCFG_MEM_MASK) {
185 	case CLKCFG_MEM_533:
186 		dev_priv->mem_freq = 533;
187 		break;
188 	case CLKCFG_MEM_667:
189 		dev_priv->mem_freq = 667;
190 		break;
191 	case CLKCFG_MEM_800:
192 		dev_priv->mem_freq = 800;
193 		break;
194 	}
195 
196 	/* detect pineview DDR3 setting */
197 	tmp = I915_READ(CSHRDDR3CTL);
198 	dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
199 }
200 
ilk_get_mem_freq(struct drm_i915_private * dev_priv)201 static void ilk_get_mem_freq(struct drm_i915_private *dev_priv)
202 {
203 	u16 ddrpll, csipll;
204 
205 	ddrpll = intel_uncore_read16(&dev_priv->uncore, DDRMPLL1);
206 	csipll = intel_uncore_read16(&dev_priv->uncore, CSIPLL0);
207 
208 	switch (ddrpll & 0xff) {
209 	case 0xc:
210 		dev_priv->mem_freq = 800;
211 		break;
212 	case 0x10:
213 		dev_priv->mem_freq = 1066;
214 		break;
215 	case 0x14:
216 		dev_priv->mem_freq = 1333;
217 		break;
218 	case 0x18:
219 		dev_priv->mem_freq = 1600;
220 		break;
221 	default:
222 		drm_dbg(&dev_priv->drm, "unknown memory frequency 0x%02x\n",
223 			ddrpll & 0xff);
224 		dev_priv->mem_freq = 0;
225 		break;
226 	}
227 
228 	switch (csipll & 0x3ff) {
229 	case 0x00c:
230 		dev_priv->fsb_freq = 3200;
231 		break;
232 	case 0x00e:
233 		dev_priv->fsb_freq = 3733;
234 		break;
235 	case 0x010:
236 		dev_priv->fsb_freq = 4266;
237 		break;
238 	case 0x012:
239 		dev_priv->fsb_freq = 4800;
240 		break;
241 	case 0x014:
242 		dev_priv->fsb_freq = 5333;
243 		break;
244 	case 0x016:
245 		dev_priv->fsb_freq = 5866;
246 		break;
247 	case 0x018:
248 		dev_priv->fsb_freq = 6400;
249 		break;
250 	default:
251 		drm_dbg(&dev_priv->drm, "unknown fsb frequency 0x%04x\n",
252 			csipll & 0x3ff);
253 		dev_priv->fsb_freq = 0;
254 		break;
255 	}
256 }
257 
258 static const struct cxsr_latency cxsr_latency_table[] = {
259 	{1, 0, 800, 400, 3382, 33382, 3983, 33983},    /* DDR2-400 SC */
260 	{1, 0, 800, 667, 3354, 33354, 3807, 33807},    /* DDR2-667 SC */
261 	{1, 0, 800, 800, 3347, 33347, 3763, 33763},    /* DDR2-800 SC */
262 	{1, 1, 800, 667, 6420, 36420, 6873, 36873},    /* DDR3-667 SC */
263 	{1, 1, 800, 800, 5902, 35902, 6318, 36318},    /* DDR3-800 SC */
264 
265 	{1, 0, 667, 400, 3400, 33400, 4021, 34021},    /* DDR2-400 SC */
266 	{1, 0, 667, 667, 3372, 33372, 3845, 33845},    /* DDR2-667 SC */
267 	{1, 0, 667, 800, 3386, 33386, 3822, 33822},    /* DDR2-800 SC */
268 	{1, 1, 667, 667, 6438, 36438, 6911, 36911},    /* DDR3-667 SC */
269 	{1, 1, 667, 800, 5941, 35941, 6377, 36377},    /* DDR3-800 SC */
270 
271 	{1, 0, 400, 400, 3472, 33472, 4173, 34173},    /* DDR2-400 SC */
272 	{1, 0, 400, 667, 3443, 33443, 3996, 33996},    /* DDR2-667 SC */
273 	{1, 0, 400, 800, 3430, 33430, 3946, 33946},    /* DDR2-800 SC */
274 	{1, 1, 400, 667, 6509, 36509, 7062, 37062},    /* DDR3-667 SC */
275 	{1, 1, 400, 800, 5985, 35985, 6501, 36501},    /* DDR3-800 SC */
276 
277 	{0, 0, 800, 400, 3438, 33438, 4065, 34065},    /* DDR2-400 SC */
278 	{0, 0, 800, 667, 3410, 33410, 3889, 33889},    /* DDR2-667 SC */
279 	{0, 0, 800, 800, 3403, 33403, 3845, 33845},    /* DDR2-800 SC */
280 	{0, 1, 800, 667, 6476, 36476, 6955, 36955},    /* DDR3-667 SC */
281 	{0, 1, 800, 800, 5958, 35958, 6400, 36400},    /* DDR3-800 SC */
282 
283 	{0, 0, 667, 400, 3456, 33456, 4103, 34106},    /* DDR2-400 SC */
284 	{0, 0, 667, 667, 3428, 33428, 3927, 33927},    /* DDR2-667 SC */
285 	{0, 0, 667, 800, 3443, 33443, 3905, 33905},    /* DDR2-800 SC */
286 	{0, 1, 667, 667, 6494, 36494, 6993, 36993},    /* DDR3-667 SC */
287 	{0, 1, 667, 800, 5998, 35998, 6460, 36460},    /* DDR3-800 SC */
288 
289 	{0, 0, 400, 400, 3528, 33528, 4255, 34255},    /* DDR2-400 SC */
290 	{0, 0, 400, 667, 3500, 33500, 4079, 34079},    /* DDR2-667 SC */
291 	{0, 0, 400, 800, 3487, 33487, 4029, 34029},    /* DDR2-800 SC */
292 	{0, 1, 400, 667, 6566, 36566, 7145, 37145},    /* DDR3-667 SC */
293 	{0, 1, 400, 800, 6042, 36042, 6584, 36584},    /* DDR3-800 SC */
294 };
295 
intel_get_cxsr_latency(bool is_desktop,bool is_ddr3,int fsb,int mem)296 static const struct cxsr_latency *intel_get_cxsr_latency(bool is_desktop,
297 							 bool is_ddr3,
298 							 int fsb,
299 							 int mem)
300 {
301 	const struct cxsr_latency *latency;
302 	int i;
303 
304 	if (fsb == 0 || mem == 0)
305 		return NULL;
306 
307 	for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
308 		latency = &cxsr_latency_table[i];
309 		if (is_desktop == latency->is_desktop &&
310 		    is_ddr3 == latency->is_ddr3 &&
311 		    fsb == latency->fsb_freq && mem == latency->mem_freq)
312 			return latency;
313 	}
314 
315 	DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
316 
317 	return NULL;
318 }
319 
chv_set_memory_dvfs(struct drm_i915_private * dev_priv,bool enable)320 static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
321 {
322 	u32 val;
323 
324 	vlv_punit_get(dev_priv);
325 
326 	val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
327 	if (enable)
328 		val &= ~FORCE_DDR_HIGH_FREQ;
329 	else
330 		val |= FORCE_DDR_HIGH_FREQ;
331 	val &= ~FORCE_DDR_LOW_FREQ;
332 	val |= FORCE_DDR_FREQ_REQ_ACK;
333 	vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
334 
335 	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
336 		      FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
337 		drm_err(&dev_priv->drm,
338 			"timed out waiting for Punit DDR DVFS request\n");
339 
340 	vlv_punit_put(dev_priv);
341 }
342 
chv_set_memory_pm5(struct drm_i915_private * dev_priv,bool enable)343 static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
344 {
345 	u32 val;
346 
347 	vlv_punit_get(dev_priv);
348 
349 	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
350 	if (enable)
351 		val |= DSP_MAXFIFO_PM5_ENABLE;
352 	else
353 		val &= ~DSP_MAXFIFO_PM5_ENABLE;
354 	vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
355 
356 	vlv_punit_put(dev_priv);
357 }
358 
359 #define FW_WM(value, plane) \
360 	(((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
361 
_intel_set_memory_cxsr(struct drm_i915_private * dev_priv,bool enable)362 static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
363 {
364 	bool was_enabled;
365 	u32 val;
366 
367 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
368 		was_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
369 		I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
370 		POSTING_READ(FW_BLC_SELF_VLV);
371 	} else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) {
372 		was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
373 		I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
374 		POSTING_READ(FW_BLC_SELF);
375 	} else if (IS_PINEVIEW(dev_priv)) {
376 		val = I915_READ(DSPFW3);
377 		was_enabled = val & PINEVIEW_SELF_REFRESH_EN;
378 		if (enable)
379 			val |= PINEVIEW_SELF_REFRESH_EN;
380 		else
381 			val &= ~PINEVIEW_SELF_REFRESH_EN;
382 		I915_WRITE(DSPFW3, val);
383 		POSTING_READ(DSPFW3);
384 	} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) {
385 		was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
386 		val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
387 			       _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
388 		I915_WRITE(FW_BLC_SELF, val);
389 		POSTING_READ(FW_BLC_SELF);
390 	} else if (IS_I915GM(dev_priv)) {
391 		/*
392 		 * FIXME can't find a bit like this for 915G, and
393 		 * and yet it does have the related watermark in
394 		 * FW_BLC_SELF. What's going on?
395 		 */
396 		was_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
397 		val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
398 			       _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
399 		I915_WRITE(INSTPM, val);
400 		POSTING_READ(INSTPM);
401 	} else {
402 		return false;
403 	}
404 
405 	trace_intel_memory_cxsr(dev_priv, was_enabled, enable);
406 
407 	drm_dbg_kms(&dev_priv->drm, "memory self-refresh is %s (was %s)\n",
408 		    enableddisabled(enable),
409 		    enableddisabled(was_enabled));
410 
411 	return was_enabled;
412 }
413 
414 /**
415  * intel_set_memory_cxsr - Configure CxSR state
416  * @dev_priv: i915 device
417  * @enable: Allow vs. disallow CxSR
418  *
419  * Allow or disallow the system to enter a special CxSR
420  * (C-state self refresh) state. What typically happens in CxSR mode
421  * is that several display FIFOs may get combined into a single larger
422  * FIFO for a particular plane (so called max FIFO mode) to allow the
423  * system to defer memory fetches longer, and the memory will enter
424  * self refresh.
425  *
426  * Note that enabling CxSR does not guarantee that the system enter
427  * this special mode, nor does it guarantee that the system stays
428  * in that mode once entered. So this just allows/disallows the system
429  * to autonomously utilize the CxSR mode. Other factors such as core
430  * C-states will affect when/if the system actually enters/exits the
431  * CxSR mode.
432  *
433  * Note that on VLV/CHV this actually only controls the max FIFO mode,
434  * and the system is free to enter/exit memory self refresh at any time
435  * even when the use of CxSR has been disallowed.
436  *
437  * While the system is actually in the CxSR/max FIFO mode, some plane
438  * control registers will not get latched on vblank. Thus in order to
439  * guarantee the system will respond to changes in the plane registers
440  * we must always disallow CxSR prior to making changes to those registers.
441  * Unfortunately the system will re-evaluate the CxSR conditions at
442  * frame start which happens after vblank start (which is when the plane
443  * registers would get latched), so we can't proceed with the plane update
444  * during the same frame where we disallowed CxSR.
445  *
446  * Certain platforms also have a deeper HPLL SR mode. Fortunately the
447  * HPLL SR mode depends on CxSR itself, so we don't have to hand hold
448  * the hardware w.r.t. HPLL SR when writing to plane registers.
449  * Disallowing just CxSR is sufficient.
450  */
intel_set_memory_cxsr(struct drm_i915_private * dev_priv,bool enable)451 bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
452 {
453 	bool ret;
454 
455 	mutex_lock(&dev_priv->wm.wm_mutex);
456 	ret = _intel_set_memory_cxsr(dev_priv, enable);
457 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
458 		dev_priv->wm.vlv.cxsr = enable;
459 	else if (IS_G4X(dev_priv))
460 		dev_priv->wm.g4x.cxsr = enable;
461 	mutex_unlock(&dev_priv->wm.wm_mutex);
462 
463 	return ret;
464 }
465 
466 /*
467  * Latency for FIFO fetches is dependent on several factors:
468  *   - memory configuration (speed, channels)
469  *   - chipset
470  *   - current MCH state
471  * It can be fairly high in some situations, so here we assume a fairly
472  * pessimal value.  It's a tradeoff between extra memory fetches (if we
473  * set this value too high, the FIFO will fetch frequently to stay full)
474  * and power consumption (set it too low to save power and we might see
475  * FIFO underruns and display "flicker").
476  *
477  * A value of 5us seems to be a good balance; safe for very low end
478  * platforms but not overly aggressive on lower latency configs.
479  */
480 static const int pessimal_latency_ns = 5000;
481 
482 #define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
483 	((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
484 
vlv_get_fifo_size(struct intel_crtc_state * crtc_state)485 static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
486 {
487 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
488 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
489 	struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
490 	enum pipe pipe = crtc->pipe;
491 	int sprite0_start, sprite1_start;
492 	u32 dsparb, dsparb2, dsparb3;
493 
494 	switch (pipe) {
495 	case PIPE_A:
496 		dsparb = I915_READ(DSPARB);
497 		dsparb2 = I915_READ(DSPARB2);
498 		sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
499 		sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
500 		break;
501 	case PIPE_B:
502 		dsparb = I915_READ(DSPARB);
503 		dsparb2 = I915_READ(DSPARB2);
504 		sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
505 		sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
506 		break;
507 	case PIPE_C:
508 		dsparb2 = I915_READ(DSPARB2);
509 		dsparb3 = I915_READ(DSPARB3);
510 		sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
511 		sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
512 		break;
513 	default:
514 		MISSING_CASE(pipe);
515 		return;
516 	}
517 
518 	fifo_state->plane[PLANE_PRIMARY] = sprite0_start;
519 	fifo_state->plane[PLANE_SPRITE0] = sprite1_start - sprite0_start;
520 	fifo_state->plane[PLANE_SPRITE1] = 511 - sprite1_start;
521 	fifo_state->plane[PLANE_CURSOR] = 63;
522 }
523 
i9xx_get_fifo_size(struct drm_i915_private * dev_priv,enum i9xx_plane_id i9xx_plane)524 static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
525 			      enum i9xx_plane_id i9xx_plane)
526 {
527 	u32 dsparb = I915_READ(DSPARB);
528 	int size;
529 
530 	size = dsparb & 0x7f;
531 	if (i9xx_plane == PLANE_B)
532 		size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
533 
534 	drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
535 		    dsparb, plane_name(i9xx_plane), size);
536 
537 	return size;
538 }
539 
i830_get_fifo_size(struct drm_i915_private * dev_priv,enum i9xx_plane_id i9xx_plane)540 static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
541 			      enum i9xx_plane_id i9xx_plane)
542 {
543 	u32 dsparb = I915_READ(DSPARB);
544 	int size;
545 
546 	size = dsparb & 0x1ff;
547 	if (i9xx_plane == PLANE_B)
548 		size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
549 	size >>= 1; /* Convert to cachelines */
550 
551 	drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
552 		    dsparb, plane_name(i9xx_plane), size);
553 
554 	return size;
555 }
556 
i845_get_fifo_size(struct drm_i915_private * dev_priv,enum i9xx_plane_id i9xx_plane)557 static int i845_get_fifo_size(struct drm_i915_private *dev_priv,
558 			      enum i9xx_plane_id i9xx_plane)
559 {
560 	u32 dsparb = I915_READ(DSPARB);
561 	int size;
562 
563 	size = dsparb & 0x7f;
564 	size >>= 2; /* Convert to cachelines */
565 
566 	drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
567 		    dsparb, plane_name(i9xx_plane), size);
568 
569 	return size;
570 }
571 
572 /* Pineview has different values for various configs */
573 static const struct intel_watermark_params pnv_display_wm = {
574 	.fifo_size = PINEVIEW_DISPLAY_FIFO,
575 	.max_wm = PINEVIEW_MAX_WM,
576 	.default_wm = PINEVIEW_DFT_WM,
577 	.guard_size = PINEVIEW_GUARD_WM,
578 	.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
579 };
580 
581 static const struct intel_watermark_params pnv_display_hplloff_wm = {
582 	.fifo_size = PINEVIEW_DISPLAY_FIFO,
583 	.max_wm = PINEVIEW_MAX_WM,
584 	.default_wm = PINEVIEW_DFT_HPLLOFF_WM,
585 	.guard_size = PINEVIEW_GUARD_WM,
586 	.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
587 };
588 
589 static const struct intel_watermark_params pnv_cursor_wm = {
590 	.fifo_size = PINEVIEW_CURSOR_FIFO,
591 	.max_wm = PINEVIEW_CURSOR_MAX_WM,
592 	.default_wm = PINEVIEW_CURSOR_DFT_WM,
593 	.guard_size = PINEVIEW_CURSOR_GUARD_WM,
594 	.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
595 };
596 
597 static const struct intel_watermark_params pnv_cursor_hplloff_wm = {
598 	.fifo_size = PINEVIEW_CURSOR_FIFO,
599 	.max_wm = PINEVIEW_CURSOR_MAX_WM,
600 	.default_wm = PINEVIEW_CURSOR_DFT_WM,
601 	.guard_size = PINEVIEW_CURSOR_GUARD_WM,
602 	.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
603 };
604 
605 static const struct intel_watermark_params i965_cursor_wm_info = {
606 	.fifo_size = I965_CURSOR_FIFO,
607 	.max_wm = I965_CURSOR_MAX_WM,
608 	.default_wm = I965_CURSOR_DFT_WM,
609 	.guard_size = 2,
610 	.cacheline_size = I915_FIFO_LINE_SIZE,
611 };
612 
613 static const struct intel_watermark_params i945_wm_info = {
614 	.fifo_size = I945_FIFO_SIZE,
615 	.max_wm = I915_MAX_WM,
616 	.default_wm = 1,
617 	.guard_size = 2,
618 	.cacheline_size = I915_FIFO_LINE_SIZE,
619 };
620 
621 static const struct intel_watermark_params i915_wm_info = {
622 	.fifo_size = I915_FIFO_SIZE,
623 	.max_wm = I915_MAX_WM,
624 	.default_wm = 1,
625 	.guard_size = 2,
626 	.cacheline_size = I915_FIFO_LINE_SIZE,
627 };
628 
629 static const struct intel_watermark_params i830_a_wm_info = {
630 	.fifo_size = I855GM_FIFO_SIZE,
631 	.max_wm = I915_MAX_WM,
632 	.default_wm = 1,
633 	.guard_size = 2,
634 	.cacheline_size = I830_FIFO_LINE_SIZE,
635 };
636 
637 static const struct intel_watermark_params i830_bc_wm_info = {
638 	.fifo_size = I855GM_FIFO_SIZE,
639 	.max_wm = I915_MAX_WM/2,
640 	.default_wm = 1,
641 	.guard_size = 2,
642 	.cacheline_size = I830_FIFO_LINE_SIZE,
643 };
644 
645 static const struct intel_watermark_params i845_wm_info = {
646 	.fifo_size = I830_FIFO_SIZE,
647 	.max_wm = I915_MAX_WM,
648 	.default_wm = 1,
649 	.guard_size = 2,
650 	.cacheline_size = I830_FIFO_LINE_SIZE,
651 };
652 
653 /**
654  * intel_wm_method1 - Method 1 / "small buffer" watermark formula
655  * @pixel_rate: Pipe pixel rate in kHz
656  * @cpp: Plane bytes per pixel
657  * @latency: Memory wakeup latency in 0.1us units
658  *
659  * Compute the watermark using the method 1 or "small buffer"
660  * formula. The caller may additonally add extra cachelines
661  * to account for TLB misses and clock crossings.
662  *
663  * This method is concerned with the short term drain rate
664  * of the FIFO, ie. it does not account for blanking periods
665  * which would effectively reduce the average drain rate across
666  * a longer period. The name "small" refers to the fact the
667  * FIFO is relatively small compared to the amount of data
668  * fetched.
669  *
670  * The FIFO level vs. time graph might look something like:
671  *
672  *   |\   |\
673  *   | \  | \
674  * __---__---__ (- plane active, _ blanking)
675  * -> time
676  *
677  * or perhaps like this:
678  *
679  *   |\|\  |\|\
680  * __----__----__ (- plane active, _ blanking)
681  * -> time
682  *
683  * Returns:
684  * The watermark in bytes
685  */
intel_wm_method1(unsigned int pixel_rate,unsigned int cpp,unsigned int latency)686 static unsigned int intel_wm_method1(unsigned int pixel_rate,
687 				     unsigned int cpp,
688 				     unsigned int latency)
689 {
690 	u64 ret;
691 
692 	ret = mul_u32_u32(pixel_rate, cpp * latency);
693 	ret = DIV_ROUND_UP_ULL(ret, 10000);
694 
695 	return ret;
696 }
697 
698 /**
699  * intel_wm_method2 - Method 2 / "large buffer" watermark formula
700  * @pixel_rate: Pipe pixel rate in kHz
701  * @htotal: Pipe horizontal total
702  * @width: Plane width in pixels
703  * @cpp: Plane bytes per pixel
704  * @latency: Memory wakeup latency in 0.1us units
705  *
706  * Compute the watermark using the method 2 or "large buffer"
707  * formula. The caller may additonally add extra cachelines
708  * to account for TLB misses and clock crossings.
709  *
710  * This method is concerned with the long term drain rate
711  * of the FIFO, ie. it does account for blanking periods
712  * which effectively reduce the average drain rate across
713  * a longer period. The name "large" refers to the fact the
714  * FIFO is relatively large compared to the amount of data
715  * fetched.
716  *
717  * The FIFO level vs. time graph might look something like:
718  *
719  *    |\___       |\___
720  *    |    \___   |    \___
721  *    |        \  |        \
722  * __ --__--__--__--__--__--__ (- plane active, _ blanking)
723  * -> time
724  *
725  * Returns:
726  * The watermark in bytes
727  */
intel_wm_method2(unsigned int pixel_rate,unsigned int htotal,unsigned int width,unsigned int cpp,unsigned int latency)728 static unsigned int intel_wm_method2(unsigned int pixel_rate,
729 				     unsigned int htotal,
730 				     unsigned int width,
731 				     unsigned int cpp,
732 				     unsigned int latency)
733 {
734 	unsigned int ret;
735 
736 	/*
737 	 * FIXME remove once all users are computing
738 	 * watermarks in the correct place.
739 	 */
740 	if (WARN_ON_ONCE(htotal == 0))
741 		htotal = 1;
742 
743 	ret = (latency * pixel_rate) / (htotal * 10000);
744 	ret = (ret + 1) * width * cpp;
745 
746 	return ret;
747 }
748 
749 /**
750  * intel_calculate_wm - calculate watermark level
751  * @pixel_rate: pixel clock
752  * @wm: chip FIFO params
753  * @fifo_size: size of the FIFO buffer
754  * @cpp: bytes per pixel
755  * @latency_ns: memory latency for the platform
756  *
757  * Calculate the watermark level (the level at which the display plane will
758  * start fetching from memory again).  Each chip has a different display
759  * FIFO size and allocation, so the caller needs to figure that out and pass
760  * in the correct intel_watermark_params structure.
761  *
762  * As the pixel clock runs, the FIFO will be drained at a rate that depends
763  * on the pixel size.  When it reaches the watermark level, it'll start
764  * fetching FIFO line sized based chunks from memory until the FIFO fills
765  * past the watermark point.  If the FIFO drains completely, a FIFO underrun
766  * will occur, and a display engine hang could result.
767  */
intel_calculate_wm(int pixel_rate,const struct intel_watermark_params * wm,int fifo_size,int cpp,unsigned int latency_ns)768 static unsigned int intel_calculate_wm(int pixel_rate,
769 				       const struct intel_watermark_params *wm,
770 				       int fifo_size, int cpp,
771 				       unsigned int latency_ns)
772 {
773 	int entries, wm_size;
774 
775 	/*
776 	 * Note: we need to make sure we don't overflow for various clock &
777 	 * latency values.
778 	 * clocks go from a few thousand to several hundred thousand.
779 	 * latency is usually a few thousand
780 	 */
781 	entries = intel_wm_method1(pixel_rate, cpp,
782 				   latency_ns / 100);
783 	entries = DIV_ROUND_UP(entries, wm->cacheline_size) +
784 		wm->guard_size;
785 	DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries);
786 
787 	wm_size = fifo_size - entries;
788 	DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size);
789 
790 	/* Don't promote wm_size to unsigned... */
791 	if (wm_size > wm->max_wm)
792 		wm_size = wm->max_wm;
793 	if (wm_size <= 0)
794 		wm_size = wm->default_wm;
795 
796 	/*
797 	 * Bspec seems to indicate that the value shouldn't be lower than
798 	 * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
799 	 * Lets go for 8 which is the burst size since certain platforms
800 	 * already use a hardcoded 8 (which is what the spec says should be
801 	 * done).
802 	 */
803 	if (wm_size <= 8)
804 		wm_size = 8;
805 
806 	return wm_size;
807 }
808 
is_disabling(int old,int new,int threshold)809 static bool is_disabling(int old, int new, int threshold)
810 {
811 	return old >= threshold && new < threshold;
812 }
813 
is_enabling(int old,int new,int threshold)814 static bool is_enabling(int old, int new, int threshold)
815 {
816 	return old < threshold && new >= threshold;
817 }
818 
intel_wm_num_levels(struct drm_i915_private * dev_priv)819 static int intel_wm_num_levels(struct drm_i915_private *dev_priv)
820 {
821 	return dev_priv->wm.max_level + 1;
822 }
823 
intel_wm_plane_visible(const struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state)824 static bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
825 				   const struct intel_plane_state *plane_state)
826 {
827 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
828 
829 	/* FIXME check the 'enable' instead */
830 	if (!crtc_state->hw.active)
831 		return false;
832 
833 	/*
834 	 * Treat cursor with fb as always visible since cursor updates
835 	 * can happen faster than the vrefresh rate, and the current
836 	 * watermark code doesn't handle that correctly. Cursor updates
837 	 * which set/clear the fb or change the cursor size are going
838 	 * to get throttled by intel_legacy_cursor_update() to work
839 	 * around this problem with the watermark code.
840 	 */
841 	if (plane->id == PLANE_CURSOR)
842 		return plane_state->hw.fb != NULL;
843 	else
844 		return plane_state->uapi.visible;
845 }
846 
intel_crtc_active(struct intel_crtc * crtc)847 static bool intel_crtc_active(struct intel_crtc *crtc)
848 {
849 	/* Be paranoid as we can arrive here with only partial
850 	 * state retrieved from the hardware during setup.
851 	 *
852 	 * We can ditch the adjusted_mode.crtc_clock check as soon
853 	 * as Haswell has gained clock readout/fastboot support.
854 	 *
855 	 * We can ditch the crtc->primary->state->fb check as soon as we can
856 	 * properly reconstruct framebuffers.
857 	 *
858 	 * FIXME: The intel_crtc->active here should be switched to
859 	 * crtc->state->active once we have proper CRTC states wired up
860 	 * for atomic.
861 	 */
862 	return crtc->active && crtc->base.primary->state->fb &&
863 		crtc->config->hw.adjusted_mode.crtc_clock;
864 }
865 
single_enabled_crtc(struct drm_i915_private * dev_priv)866 static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
867 {
868 	struct intel_crtc *crtc, *enabled = NULL;
869 
870 	for_each_intel_crtc(&dev_priv->drm, crtc) {
871 		if (intel_crtc_active(crtc)) {
872 			if (enabled)
873 				return NULL;
874 			enabled = crtc;
875 		}
876 	}
877 
878 	return enabled;
879 }
880 
pnv_update_wm(struct intel_crtc * unused_crtc)881 static void pnv_update_wm(struct intel_crtc *unused_crtc)
882 {
883 	struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
884 	struct intel_crtc *crtc;
885 	const struct cxsr_latency *latency;
886 	u32 reg;
887 	unsigned int wm;
888 
889 	latency = intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
890 					 dev_priv->is_ddr3,
891 					 dev_priv->fsb_freq,
892 					 dev_priv->mem_freq);
893 	if (!latency) {
894 		drm_dbg_kms(&dev_priv->drm,
895 			    "Unknown FSB/MEM found, disable CxSR\n");
896 		intel_set_memory_cxsr(dev_priv, false);
897 		return;
898 	}
899 
900 	crtc = single_enabled_crtc(dev_priv);
901 	if (crtc) {
902 		const struct drm_display_mode *adjusted_mode =
903 			&crtc->config->hw.adjusted_mode;
904 		const struct drm_framebuffer *fb =
905 			crtc->base.primary->state->fb;
906 		int cpp = fb->format->cpp[0];
907 		int clock = adjusted_mode->crtc_clock;
908 
909 		/* Display SR */
910 		wm = intel_calculate_wm(clock, &pnv_display_wm,
911 					pnv_display_wm.fifo_size,
912 					cpp, latency->display_sr);
913 		reg = I915_READ(DSPFW1);
914 		reg &= ~DSPFW_SR_MASK;
915 		reg |= FW_WM(wm, SR);
916 		I915_WRITE(DSPFW1, reg);
917 		drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg);
918 
919 		/* cursor SR */
920 		wm = intel_calculate_wm(clock, &pnv_cursor_wm,
921 					pnv_display_wm.fifo_size,
922 					4, latency->cursor_sr);
923 		reg = I915_READ(DSPFW3);
924 		reg &= ~DSPFW_CURSOR_SR_MASK;
925 		reg |= FW_WM(wm, CURSOR_SR);
926 		I915_WRITE(DSPFW3, reg);
927 
928 		/* Display HPLL off SR */
929 		wm = intel_calculate_wm(clock, &pnv_display_hplloff_wm,
930 					pnv_display_hplloff_wm.fifo_size,
931 					cpp, latency->display_hpll_disable);
932 		reg = I915_READ(DSPFW3);
933 		reg &= ~DSPFW_HPLL_SR_MASK;
934 		reg |= FW_WM(wm, HPLL_SR);
935 		I915_WRITE(DSPFW3, reg);
936 
937 		/* cursor HPLL off SR */
938 		wm = intel_calculate_wm(clock, &pnv_cursor_hplloff_wm,
939 					pnv_display_hplloff_wm.fifo_size,
940 					4, latency->cursor_hpll_disable);
941 		reg = I915_READ(DSPFW3);
942 		reg &= ~DSPFW_HPLL_CURSOR_MASK;
943 		reg |= FW_WM(wm, HPLL_CURSOR);
944 		I915_WRITE(DSPFW3, reg);
945 		drm_dbg_kms(&dev_priv->drm, "DSPFW3 register is %x\n", reg);
946 
947 		intel_set_memory_cxsr(dev_priv, true);
948 	} else {
949 		intel_set_memory_cxsr(dev_priv, false);
950 	}
951 }
952 
953 /*
954  * Documentation says:
955  * "If the line size is small, the TLB fetches can get in the way of the
956  *  data fetches, causing some lag in the pixel data return which is not
957  *  accounted for in the above formulas. The following adjustment only
958  *  needs to be applied if eight whole lines fit in the buffer at once.
959  *  The WM is adjusted upwards by the difference between the FIFO size
960  *  and the size of 8 whole lines. This adjustment is always performed
961  *  in the actual pixel depth regardless of whether FBC is enabled or not."
962  */
g4x_tlb_miss_wa(int fifo_size,int width,int cpp)963 static unsigned int g4x_tlb_miss_wa(int fifo_size, int width, int cpp)
964 {
965 	int tlb_miss = fifo_size * 64 - width * cpp * 8;
966 
967 	return max(0, tlb_miss);
968 }
969 
g4x_write_wm_values(struct drm_i915_private * dev_priv,const struct g4x_wm_values * wm)970 static void g4x_write_wm_values(struct drm_i915_private *dev_priv,
971 				const struct g4x_wm_values *wm)
972 {
973 	enum pipe pipe;
974 
975 	for_each_pipe(dev_priv, pipe)
976 		trace_g4x_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm);
977 
978 	I915_WRITE(DSPFW1,
979 		   FW_WM(wm->sr.plane, SR) |
980 		   FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
981 		   FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
982 		   FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
983 	I915_WRITE(DSPFW2,
984 		   (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) |
985 		   FW_WM(wm->sr.fbc, FBC_SR) |
986 		   FW_WM(wm->hpll.fbc, FBC_HPLL_SR) |
987 		   FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) |
988 		   FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
989 		   FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
990 	I915_WRITE(DSPFW3,
991 		   (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) |
992 		   FW_WM(wm->sr.cursor, CURSOR_SR) |
993 		   FW_WM(wm->hpll.cursor, HPLL_CURSOR) |
994 		   FW_WM(wm->hpll.plane, HPLL_SR));
995 
996 	POSTING_READ(DSPFW1);
997 }
998 
999 #define FW_WM_VLV(value, plane) \
1000 	(((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
1001 
vlv_write_wm_values(struct drm_i915_private * dev_priv,const struct vlv_wm_values * wm)1002 static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
1003 				const struct vlv_wm_values *wm)
1004 {
1005 	enum pipe pipe;
1006 
1007 	for_each_pipe(dev_priv, pipe) {
1008 		trace_vlv_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm);
1009 
1010 		I915_WRITE(VLV_DDL(pipe),
1011 			   (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
1012 			   (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
1013 			   (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
1014 			   (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT));
1015 	}
1016 
1017 	/*
1018 	 * Zero the (unused) WM1 watermarks, and also clear all the
1019 	 * high order bits so that there are no out of bounds values
1020 	 * present in the registers during the reprogramming.
1021 	 */
1022 	I915_WRITE(DSPHOWM, 0);
1023 	I915_WRITE(DSPHOWM1, 0);
1024 	I915_WRITE(DSPFW4, 0);
1025 	I915_WRITE(DSPFW5, 0);
1026 	I915_WRITE(DSPFW6, 0);
1027 
1028 	I915_WRITE(DSPFW1,
1029 		   FW_WM(wm->sr.plane, SR) |
1030 		   FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
1031 		   FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
1032 		   FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
1033 	I915_WRITE(DSPFW2,
1034 		   FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
1035 		   FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
1036 		   FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
1037 	I915_WRITE(DSPFW3,
1038 		   FW_WM(wm->sr.cursor, CURSOR_SR));
1039 
1040 	if (IS_CHERRYVIEW(dev_priv)) {
1041 		I915_WRITE(DSPFW7_CHV,
1042 			   FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
1043 			   FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
1044 		I915_WRITE(DSPFW8_CHV,
1045 			   FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
1046 			   FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
1047 		I915_WRITE(DSPFW9_CHV,
1048 			   FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
1049 			   FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
1050 		I915_WRITE(DSPHOWM,
1051 			   FW_WM(wm->sr.plane >> 9, SR_HI) |
1052 			   FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
1053 			   FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
1054 			   FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) |
1055 			   FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
1056 			   FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
1057 			   FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
1058 			   FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
1059 			   FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
1060 			   FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
1061 	} else {
1062 		I915_WRITE(DSPFW7,
1063 			   FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
1064 			   FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
1065 		I915_WRITE(DSPHOWM,
1066 			   FW_WM(wm->sr.plane >> 9, SR_HI) |
1067 			   FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
1068 			   FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
1069 			   FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
1070 			   FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
1071 			   FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
1072 			   FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
1073 	}
1074 
1075 	POSTING_READ(DSPFW1);
1076 }
1077 
1078 #undef FW_WM_VLV
1079 
g4x_setup_wm_latency(struct drm_i915_private * dev_priv)1080 static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv)
1081 {
1082 	/* all latencies in usec */
1083 	dev_priv->wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
1084 	dev_priv->wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
1085 	dev_priv->wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
1086 
1087 	dev_priv->wm.max_level = G4X_WM_LEVEL_HPLL;
1088 }
1089 
g4x_plane_fifo_size(enum plane_id plane_id,int level)1090 static int g4x_plane_fifo_size(enum plane_id plane_id, int level)
1091 {
1092 	/*
1093 	 * DSPCNTR[13] supposedly controls whether the
1094 	 * primary plane can use the FIFO space otherwise
1095 	 * reserved for the sprite plane. It's not 100% clear
1096 	 * what the actual FIFO size is, but it looks like we
1097 	 * can happily set both primary and sprite watermarks
1098 	 * up to 127 cachelines. So that would seem to mean
1099 	 * that either DSPCNTR[13] doesn't do anything, or that
1100 	 * the total FIFO is >= 256 cachelines in size. Either
1101 	 * way, we don't seem to have to worry about this
1102 	 * repartitioning as the maximum watermark value the
1103 	 * register can hold for each plane is lower than the
1104 	 * minimum FIFO size.
1105 	 */
1106 	switch (plane_id) {
1107 	case PLANE_CURSOR:
1108 		return 63;
1109 	case PLANE_PRIMARY:
1110 		return level == G4X_WM_LEVEL_NORMAL ? 127 : 511;
1111 	case PLANE_SPRITE0:
1112 		return level == G4X_WM_LEVEL_NORMAL ? 127 : 0;
1113 	default:
1114 		MISSING_CASE(plane_id);
1115 		return 0;
1116 	}
1117 }
1118 
g4x_fbc_fifo_size(int level)1119 static int g4x_fbc_fifo_size(int level)
1120 {
1121 	switch (level) {
1122 	case G4X_WM_LEVEL_SR:
1123 		return 7;
1124 	case G4X_WM_LEVEL_HPLL:
1125 		return 15;
1126 	default:
1127 		MISSING_CASE(level);
1128 		return 0;
1129 	}
1130 }
1131 
g4x_compute_wm(const struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state,int level)1132 static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
1133 			  const struct intel_plane_state *plane_state,
1134 			  int level)
1135 {
1136 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1137 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1138 	const struct drm_display_mode *adjusted_mode =
1139 		&crtc_state->hw.adjusted_mode;
1140 	unsigned int latency = dev_priv->wm.pri_latency[level] * 10;
1141 	unsigned int clock, htotal, cpp, width, wm;
1142 
1143 	if (latency == 0)
1144 		return USHRT_MAX;
1145 
1146 	if (!intel_wm_plane_visible(crtc_state, plane_state))
1147 		return 0;
1148 
1149 	cpp = plane_state->hw.fb->format->cpp[0];
1150 
1151 	/*
1152 	 * Not 100% sure which way ELK should go here as the
1153 	 * spec only says CL/CTG should assume 32bpp and BW
1154 	 * doesn't need to. But as these things followed the
1155 	 * mobile vs. desktop lines on gen3 as well, let's
1156 	 * assume ELK doesn't need this.
1157 	 *
1158 	 * The spec also fails to list such a restriction for
1159 	 * the HPLL watermark, which seems a little strange.
1160 	 * Let's use 32bpp for the HPLL watermark as well.
1161 	 */
1162 	if (IS_GM45(dev_priv) && plane->id == PLANE_PRIMARY &&
1163 	    level != G4X_WM_LEVEL_NORMAL)
1164 		cpp = max(cpp, 4u);
1165 
1166 	clock = adjusted_mode->crtc_clock;
1167 	htotal = adjusted_mode->crtc_htotal;
1168 
1169 	width = drm_rect_width(&plane_state->uapi.dst);
1170 
1171 	if (plane->id == PLANE_CURSOR) {
1172 		wm = intel_wm_method2(clock, htotal, width, cpp, latency);
1173 	} else if (plane->id == PLANE_PRIMARY &&
1174 		   level == G4X_WM_LEVEL_NORMAL) {
1175 		wm = intel_wm_method1(clock, cpp, latency);
1176 	} else {
1177 		unsigned int small, large;
1178 
1179 		small = intel_wm_method1(clock, cpp, latency);
1180 		large = intel_wm_method2(clock, htotal, width, cpp, latency);
1181 
1182 		wm = min(small, large);
1183 	}
1184 
1185 	wm += g4x_tlb_miss_wa(g4x_plane_fifo_size(plane->id, level),
1186 			      width, cpp);
1187 
1188 	wm = DIV_ROUND_UP(wm, 64) + 2;
1189 
1190 	return min_t(unsigned int, wm, USHRT_MAX);
1191 }
1192 
g4x_raw_plane_wm_set(struct intel_crtc_state * crtc_state,int level,enum plane_id plane_id,u16 value)1193 static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
1194 				 int level, enum plane_id plane_id, u16 value)
1195 {
1196 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1197 	bool dirty = false;
1198 
1199 	for (; level < intel_wm_num_levels(dev_priv); level++) {
1200 		struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1201 
1202 		dirty |= raw->plane[plane_id] != value;
1203 		raw->plane[plane_id] = value;
1204 	}
1205 
1206 	return dirty;
1207 }
1208 
g4x_raw_fbc_wm_set(struct intel_crtc_state * crtc_state,int level,u16 value)1209 static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
1210 			       int level, u16 value)
1211 {
1212 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1213 	bool dirty = false;
1214 
1215 	/* NORMAL level doesn't have an FBC watermark */
1216 	level = max(level, G4X_WM_LEVEL_SR);
1217 
1218 	for (; level < intel_wm_num_levels(dev_priv); level++) {
1219 		struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1220 
1221 		dirty |= raw->fbc != value;
1222 		raw->fbc = value;
1223 	}
1224 
1225 	return dirty;
1226 }
1227 
1228 static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
1229 			      const struct intel_plane_state *plane_state,
1230 			      u32 pri_val);
1231 
g4x_raw_plane_wm_compute(struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state)1232 static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
1233 				     const struct intel_plane_state *plane_state)
1234 {
1235 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1236 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1237 	int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
1238 	enum plane_id plane_id = plane->id;
1239 	bool dirty = false;
1240 	int level;
1241 
1242 	if (!intel_wm_plane_visible(crtc_state, plane_state)) {
1243 		dirty |= g4x_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
1244 		if (plane_id == PLANE_PRIMARY)
1245 			dirty |= g4x_raw_fbc_wm_set(crtc_state, 0, 0);
1246 		goto out;
1247 	}
1248 
1249 	for (level = 0; level < num_levels; level++) {
1250 		struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1251 		int wm, max_wm;
1252 
1253 		wm = g4x_compute_wm(crtc_state, plane_state, level);
1254 		max_wm = g4x_plane_fifo_size(plane_id, level);
1255 
1256 		if (wm > max_wm)
1257 			break;
1258 
1259 		dirty |= raw->plane[plane_id] != wm;
1260 		raw->plane[plane_id] = wm;
1261 
1262 		if (plane_id != PLANE_PRIMARY ||
1263 		    level == G4X_WM_LEVEL_NORMAL)
1264 			continue;
1265 
1266 		wm = ilk_compute_fbc_wm(crtc_state, plane_state,
1267 					raw->plane[plane_id]);
1268 		max_wm = g4x_fbc_fifo_size(level);
1269 
1270 		/*
1271 		 * FBC wm is not mandatory as we
1272 		 * can always just disable its use.
1273 		 */
1274 		if (wm > max_wm)
1275 			wm = USHRT_MAX;
1276 
1277 		dirty |= raw->fbc != wm;
1278 		raw->fbc = wm;
1279 	}
1280 
1281 	/* mark watermarks as invalid */
1282 	dirty |= g4x_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
1283 
1284 	if (plane_id == PLANE_PRIMARY)
1285 		dirty |= g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
1286 
1287  out:
1288 	if (dirty) {
1289 		drm_dbg_kms(&dev_priv->drm,
1290 			    "%s watermarks: normal=%d, SR=%d, HPLL=%d\n",
1291 			    plane->base.name,
1292 			    crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id],
1293 			    crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id],
1294 			    crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]);
1295 
1296 		if (plane_id == PLANE_PRIMARY)
1297 			drm_dbg_kms(&dev_priv->drm,
1298 				    "FBC watermarks: SR=%d, HPLL=%d\n",
1299 				    crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc,
1300 				    crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc);
1301 	}
1302 
1303 	return dirty;
1304 }
1305 
g4x_raw_plane_wm_is_valid(const struct intel_crtc_state * crtc_state,enum plane_id plane_id,int level)1306 static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
1307 				      enum plane_id plane_id, int level)
1308 {
1309 	const struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1310 
1311 	return raw->plane[plane_id] <= g4x_plane_fifo_size(plane_id, level);
1312 }
1313 
g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state * crtc_state,int level)1314 static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
1315 				     int level)
1316 {
1317 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1318 
1319 	if (level > dev_priv->wm.max_level)
1320 		return false;
1321 
1322 	return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
1323 		g4x_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
1324 		g4x_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
1325 }
1326 
1327 /* mark all levels starting from 'level' as invalid */
g4x_invalidate_wms(struct intel_crtc * crtc,struct g4x_wm_state * wm_state,int level)1328 static void g4x_invalidate_wms(struct intel_crtc *crtc,
1329 			       struct g4x_wm_state *wm_state, int level)
1330 {
1331 	if (level <= G4X_WM_LEVEL_NORMAL) {
1332 		enum plane_id plane_id;
1333 
1334 		for_each_plane_id_on_crtc(crtc, plane_id)
1335 			wm_state->wm.plane[plane_id] = USHRT_MAX;
1336 	}
1337 
1338 	if (level <= G4X_WM_LEVEL_SR) {
1339 		wm_state->cxsr = false;
1340 		wm_state->sr.cursor = USHRT_MAX;
1341 		wm_state->sr.plane = USHRT_MAX;
1342 		wm_state->sr.fbc = USHRT_MAX;
1343 	}
1344 
1345 	if (level <= G4X_WM_LEVEL_HPLL) {
1346 		wm_state->hpll_en = false;
1347 		wm_state->hpll.cursor = USHRT_MAX;
1348 		wm_state->hpll.plane = USHRT_MAX;
1349 		wm_state->hpll.fbc = USHRT_MAX;
1350 	}
1351 }
1352 
g4x_compute_fbc_en(const struct g4x_wm_state * wm_state,int level)1353 static bool g4x_compute_fbc_en(const struct g4x_wm_state *wm_state,
1354 			       int level)
1355 {
1356 	if (level < G4X_WM_LEVEL_SR)
1357 		return false;
1358 
1359 	if (level >= G4X_WM_LEVEL_SR &&
1360 	    wm_state->sr.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_SR))
1361 		return false;
1362 
1363 	if (level >= G4X_WM_LEVEL_HPLL &&
1364 	    wm_state->hpll.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_HPLL))
1365 		return false;
1366 
1367 	return true;
1368 }
1369 
g4x_compute_pipe_wm(struct intel_crtc_state * crtc_state)1370 static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
1371 {
1372 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1373 	struct intel_atomic_state *state =
1374 		to_intel_atomic_state(crtc_state->uapi.state);
1375 	struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
1376 	int num_active_planes = hweight8(crtc_state->active_planes &
1377 					 ~BIT(PLANE_CURSOR));
1378 	const struct g4x_pipe_wm *raw;
1379 	const struct intel_plane_state *old_plane_state;
1380 	const struct intel_plane_state *new_plane_state;
1381 	struct intel_plane *plane;
1382 	enum plane_id plane_id;
1383 	int i, level;
1384 	unsigned int dirty = 0;
1385 
1386 	for_each_oldnew_intel_plane_in_state(state, plane,
1387 					     old_plane_state,
1388 					     new_plane_state, i) {
1389 		if (new_plane_state->hw.crtc != &crtc->base &&
1390 		    old_plane_state->hw.crtc != &crtc->base)
1391 			continue;
1392 
1393 		if (g4x_raw_plane_wm_compute(crtc_state, new_plane_state))
1394 			dirty |= BIT(plane->id);
1395 	}
1396 
1397 	if (!dirty)
1398 		return 0;
1399 
1400 	level = G4X_WM_LEVEL_NORMAL;
1401 	if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
1402 		goto out;
1403 
1404 	raw = &crtc_state->wm.g4x.raw[level];
1405 	for_each_plane_id_on_crtc(crtc, plane_id)
1406 		wm_state->wm.plane[plane_id] = raw->plane[plane_id];
1407 
1408 	level = G4X_WM_LEVEL_SR;
1409 	if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
1410 		goto out;
1411 
1412 	raw = &crtc_state->wm.g4x.raw[level];
1413 	wm_state->sr.plane = raw->plane[PLANE_PRIMARY];
1414 	wm_state->sr.cursor = raw->plane[PLANE_CURSOR];
1415 	wm_state->sr.fbc = raw->fbc;
1416 
1417 	wm_state->cxsr = num_active_planes == BIT(PLANE_PRIMARY);
1418 
1419 	level = G4X_WM_LEVEL_HPLL;
1420 	if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
1421 		goto out;
1422 
1423 	raw = &crtc_state->wm.g4x.raw[level];
1424 	wm_state->hpll.plane = raw->plane[PLANE_PRIMARY];
1425 	wm_state->hpll.cursor = raw->plane[PLANE_CURSOR];
1426 	wm_state->hpll.fbc = raw->fbc;
1427 
1428 	wm_state->hpll_en = wm_state->cxsr;
1429 
1430 	level++;
1431 
1432  out:
1433 	if (level == G4X_WM_LEVEL_NORMAL)
1434 		return -EINVAL;
1435 
1436 	/* invalidate the higher levels */
1437 	g4x_invalidate_wms(crtc, wm_state, level);
1438 
1439 	/*
1440 	 * Determine if the FBC watermark(s) can be used. IF
1441 	 * this isn't the case we prefer to disable the FBC
1442 	 * watermark(s) rather than disable the SR/HPLL
1443 	 * level(s) entirely. 'level-1' is the highest valid
1444 	 * level here.
1445 	 */
1446 	wm_state->fbc_en = g4x_compute_fbc_en(wm_state, level - 1);
1447 
1448 	return 0;
1449 }
1450 
g4x_compute_intermediate_wm(struct intel_crtc_state * new_crtc_state)1451 static int g4x_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state)
1452 {
1453 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
1454 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1455 	struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate;
1456 	const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal;
1457 	struct intel_atomic_state *intel_state =
1458 		to_intel_atomic_state(new_crtc_state->uapi.state);
1459 	const struct intel_crtc_state *old_crtc_state =
1460 		intel_atomic_get_old_crtc_state(intel_state, crtc);
1461 	const struct g4x_wm_state *active = &old_crtc_state->wm.g4x.optimal;
1462 	enum plane_id plane_id;
1463 
1464 	if (!new_crtc_state->hw.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) {
1465 		*intermediate = *optimal;
1466 
1467 		intermediate->cxsr = false;
1468 		intermediate->hpll_en = false;
1469 		goto out;
1470 	}
1471 
1472 	intermediate->cxsr = optimal->cxsr && active->cxsr &&
1473 		!new_crtc_state->disable_cxsr;
1474 	intermediate->hpll_en = optimal->hpll_en && active->hpll_en &&
1475 		!new_crtc_state->disable_cxsr;
1476 	intermediate->fbc_en = optimal->fbc_en && active->fbc_en;
1477 
1478 	for_each_plane_id_on_crtc(crtc, plane_id) {
1479 		intermediate->wm.plane[plane_id] =
1480 			max(optimal->wm.plane[plane_id],
1481 			    active->wm.plane[plane_id]);
1482 
1483 		drm_WARN_ON(&dev_priv->drm, intermediate->wm.plane[plane_id] >
1484 			    g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL));
1485 	}
1486 
1487 	intermediate->sr.plane = max(optimal->sr.plane,
1488 				     active->sr.plane);
1489 	intermediate->sr.cursor = max(optimal->sr.cursor,
1490 				      active->sr.cursor);
1491 	intermediate->sr.fbc = max(optimal->sr.fbc,
1492 				   active->sr.fbc);
1493 
1494 	intermediate->hpll.plane = max(optimal->hpll.plane,
1495 				       active->hpll.plane);
1496 	intermediate->hpll.cursor = max(optimal->hpll.cursor,
1497 					active->hpll.cursor);
1498 	intermediate->hpll.fbc = max(optimal->hpll.fbc,
1499 				     active->hpll.fbc);
1500 
1501 	drm_WARN_ON(&dev_priv->drm,
1502 		    (intermediate->sr.plane >
1503 		     g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) ||
1504 		     intermediate->sr.cursor >
1505 		     g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) &&
1506 		    intermediate->cxsr);
1507 	drm_WARN_ON(&dev_priv->drm,
1508 		    (intermediate->sr.plane >
1509 		     g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) ||
1510 		     intermediate->sr.cursor >
1511 		     g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) &&
1512 		    intermediate->hpll_en);
1513 
1514 	drm_WARN_ON(&dev_priv->drm,
1515 		    intermediate->sr.fbc > g4x_fbc_fifo_size(1) &&
1516 		    intermediate->fbc_en && intermediate->cxsr);
1517 	drm_WARN_ON(&dev_priv->drm,
1518 		    intermediate->hpll.fbc > g4x_fbc_fifo_size(2) &&
1519 		    intermediate->fbc_en && intermediate->hpll_en);
1520 
1521 out:
1522 	/*
1523 	 * If our intermediate WM are identical to the final WM, then we can
1524 	 * omit the post-vblank programming; only update if it's different.
1525 	 */
1526 	if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
1527 		new_crtc_state->wm.need_postvbl_update = true;
1528 
1529 	return 0;
1530 }
1531 
g4x_merge_wm(struct drm_i915_private * dev_priv,struct g4x_wm_values * wm)1532 static void g4x_merge_wm(struct drm_i915_private *dev_priv,
1533 			 struct g4x_wm_values *wm)
1534 {
1535 	struct intel_crtc *crtc;
1536 	int num_active_pipes = 0;
1537 
1538 	wm->cxsr = true;
1539 	wm->hpll_en = true;
1540 	wm->fbc_en = true;
1541 
1542 	for_each_intel_crtc(&dev_priv->drm, crtc) {
1543 		const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
1544 
1545 		if (!crtc->active)
1546 			continue;
1547 
1548 		if (!wm_state->cxsr)
1549 			wm->cxsr = false;
1550 		if (!wm_state->hpll_en)
1551 			wm->hpll_en = false;
1552 		if (!wm_state->fbc_en)
1553 			wm->fbc_en = false;
1554 
1555 		num_active_pipes++;
1556 	}
1557 
1558 	if (num_active_pipes != 1) {
1559 		wm->cxsr = false;
1560 		wm->hpll_en = false;
1561 		wm->fbc_en = false;
1562 	}
1563 
1564 	for_each_intel_crtc(&dev_priv->drm, crtc) {
1565 		const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
1566 		enum pipe pipe = crtc->pipe;
1567 
1568 		wm->pipe[pipe] = wm_state->wm;
1569 		if (crtc->active && wm->cxsr)
1570 			wm->sr = wm_state->sr;
1571 		if (crtc->active && wm->hpll_en)
1572 			wm->hpll = wm_state->hpll;
1573 	}
1574 }
1575 
g4x_program_watermarks(struct drm_i915_private * dev_priv)1576 static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
1577 {
1578 	struct g4x_wm_values *old_wm = &dev_priv->wm.g4x;
1579 	struct g4x_wm_values new_wm = {};
1580 
1581 	g4x_merge_wm(dev_priv, &new_wm);
1582 
1583 	if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
1584 		return;
1585 
1586 	if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
1587 		_intel_set_memory_cxsr(dev_priv, false);
1588 
1589 	g4x_write_wm_values(dev_priv, &new_wm);
1590 
1591 	if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
1592 		_intel_set_memory_cxsr(dev_priv, true);
1593 
1594 	*old_wm = new_wm;
1595 }
1596 
g4x_initial_watermarks(struct intel_atomic_state * state,struct intel_crtc * crtc)1597 static void g4x_initial_watermarks(struct intel_atomic_state *state,
1598 				   struct intel_crtc *crtc)
1599 {
1600 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1601 	const struct intel_crtc_state *crtc_state =
1602 		intel_atomic_get_new_crtc_state(state, crtc);
1603 
1604 	mutex_lock(&dev_priv->wm.wm_mutex);
1605 	crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate;
1606 	g4x_program_watermarks(dev_priv);
1607 	mutex_unlock(&dev_priv->wm.wm_mutex);
1608 }
1609 
g4x_optimize_watermarks(struct intel_atomic_state * state,struct intel_crtc * crtc)1610 static void g4x_optimize_watermarks(struct intel_atomic_state *state,
1611 				    struct intel_crtc *crtc)
1612 {
1613 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1614 	const struct intel_crtc_state *crtc_state =
1615 		intel_atomic_get_new_crtc_state(state, crtc);
1616 
1617 	if (!crtc_state->wm.need_postvbl_update)
1618 		return;
1619 
1620 	mutex_lock(&dev_priv->wm.wm_mutex);
1621 	crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
1622 	g4x_program_watermarks(dev_priv);
1623 	mutex_unlock(&dev_priv->wm.wm_mutex);
1624 }
1625 
1626 /* latency must be in 0.1us units. */
vlv_wm_method2(unsigned int pixel_rate,unsigned int htotal,unsigned int width,unsigned int cpp,unsigned int latency)1627 static unsigned int vlv_wm_method2(unsigned int pixel_rate,
1628 				   unsigned int htotal,
1629 				   unsigned int width,
1630 				   unsigned int cpp,
1631 				   unsigned int latency)
1632 {
1633 	unsigned int ret;
1634 
1635 	ret = intel_wm_method2(pixel_rate, htotal,
1636 			       width, cpp, latency);
1637 	ret = DIV_ROUND_UP(ret, 64);
1638 
1639 	return ret;
1640 }
1641 
vlv_setup_wm_latency(struct drm_i915_private * dev_priv)1642 static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
1643 {
1644 	/* all latencies in usec */
1645 	dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
1646 
1647 	dev_priv->wm.max_level = VLV_WM_LEVEL_PM2;
1648 
1649 	if (IS_CHERRYVIEW(dev_priv)) {
1650 		dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
1651 		dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
1652 
1653 		dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
1654 	}
1655 }
1656 
vlv_compute_wm_level(const struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state,int level)1657 static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
1658 				const struct intel_plane_state *plane_state,
1659 				int level)
1660 {
1661 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1662 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1663 	const struct drm_display_mode *adjusted_mode =
1664 		&crtc_state->hw.adjusted_mode;
1665 	unsigned int clock, htotal, cpp, width, wm;
1666 
1667 	if (dev_priv->wm.pri_latency[level] == 0)
1668 		return USHRT_MAX;
1669 
1670 	if (!intel_wm_plane_visible(crtc_state, plane_state))
1671 		return 0;
1672 
1673 	cpp = plane_state->hw.fb->format->cpp[0];
1674 	clock = adjusted_mode->crtc_clock;
1675 	htotal = adjusted_mode->crtc_htotal;
1676 	width = crtc_state->pipe_src_w;
1677 
1678 	if (plane->id == PLANE_CURSOR) {
1679 		/*
1680 		 * FIXME the formula gives values that are
1681 		 * too big for the cursor FIFO, and hence we
1682 		 * would never be able to use cursors. For
1683 		 * now just hardcode the watermark.
1684 		 */
1685 		wm = 63;
1686 	} else {
1687 		wm = vlv_wm_method2(clock, htotal, width, cpp,
1688 				    dev_priv->wm.pri_latency[level] * 10);
1689 	}
1690 
1691 	return min_t(unsigned int, wm, USHRT_MAX);
1692 }
1693 
vlv_need_sprite0_fifo_workaround(unsigned int active_planes)1694 static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes)
1695 {
1696 	return (active_planes & (BIT(PLANE_SPRITE0) |
1697 				 BIT(PLANE_SPRITE1))) == BIT(PLANE_SPRITE1);
1698 }
1699 
vlv_compute_fifo(struct intel_crtc_state * crtc_state)1700 static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
1701 {
1702 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1703 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1704 	const struct g4x_pipe_wm *raw =
1705 		&crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2];
1706 	struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
1707 	unsigned int active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
1708 	int num_active_planes = hweight8(active_planes);
1709 	const int fifo_size = 511;
1710 	int fifo_extra, fifo_left = fifo_size;
1711 	int sprite0_fifo_extra = 0;
1712 	unsigned int total_rate;
1713 	enum plane_id plane_id;
1714 
1715 	/*
1716 	 * When enabling sprite0 after sprite1 has already been enabled
1717 	 * we tend to get an underrun unless sprite0 already has some
1718 	 * FIFO space allcoated. Hence we always allocate at least one
1719 	 * cacheline for sprite0 whenever sprite1 is enabled.
1720 	 *
1721 	 * All other plane enable sequences appear immune to this problem.
1722 	 */
1723 	if (vlv_need_sprite0_fifo_workaround(active_planes))
1724 		sprite0_fifo_extra = 1;
1725 
1726 	total_rate = raw->plane[PLANE_PRIMARY] +
1727 		raw->plane[PLANE_SPRITE0] +
1728 		raw->plane[PLANE_SPRITE1] +
1729 		sprite0_fifo_extra;
1730 
1731 	if (total_rate > fifo_size)
1732 		return -EINVAL;
1733 
1734 	if (total_rate == 0)
1735 		total_rate = 1;
1736 
1737 	for_each_plane_id_on_crtc(crtc, plane_id) {
1738 		unsigned int rate;
1739 
1740 		if ((active_planes & BIT(plane_id)) == 0) {
1741 			fifo_state->plane[plane_id] = 0;
1742 			continue;
1743 		}
1744 
1745 		rate = raw->plane[plane_id];
1746 		fifo_state->plane[plane_id] = fifo_size * rate / total_rate;
1747 		fifo_left -= fifo_state->plane[plane_id];
1748 	}
1749 
1750 	fifo_state->plane[PLANE_SPRITE0] += sprite0_fifo_extra;
1751 	fifo_left -= sprite0_fifo_extra;
1752 
1753 	fifo_state->plane[PLANE_CURSOR] = 63;
1754 
1755 	fifo_extra = DIV_ROUND_UP(fifo_left, num_active_planes ?: 1);
1756 
1757 	/* spread the remainder evenly */
1758 	for_each_plane_id_on_crtc(crtc, plane_id) {
1759 		int plane_extra;
1760 
1761 		if (fifo_left == 0)
1762 			break;
1763 
1764 		if ((active_planes & BIT(plane_id)) == 0)
1765 			continue;
1766 
1767 		plane_extra = min(fifo_extra, fifo_left);
1768 		fifo_state->plane[plane_id] += plane_extra;
1769 		fifo_left -= plane_extra;
1770 	}
1771 
1772 	drm_WARN_ON(&dev_priv->drm, active_planes != 0 && fifo_left != 0);
1773 
1774 	/* give it all to the first plane if none are active */
1775 	if (active_planes == 0) {
1776 		drm_WARN_ON(&dev_priv->drm, fifo_left != fifo_size);
1777 		fifo_state->plane[PLANE_PRIMARY] = fifo_left;
1778 	}
1779 
1780 	return 0;
1781 }
1782 
1783 /* mark all levels starting from 'level' as invalid */
vlv_invalidate_wms(struct intel_crtc * crtc,struct vlv_wm_state * wm_state,int level)1784 static void vlv_invalidate_wms(struct intel_crtc *crtc,
1785 			       struct vlv_wm_state *wm_state, int level)
1786 {
1787 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1788 
1789 	for (; level < intel_wm_num_levels(dev_priv); level++) {
1790 		enum plane_id plane_id;
1791 
1792 		for_each_plane_id_on_crtc(crtc, plane_id)
1793 			wm_state->wm[level].plane[plane_id] = USHRT_MAX;
1794 
1795 		wm_state->sr[level].cursor = USHRT_MAX;
1796 		wm_state->sr[level].plane = USHRT_MAX;
1797 	}
1798 }
1799 
vlv_invert_wm_value(u16 wm,u16 fifo_size)1800 static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size)
1801 {
1802 	if (wm > fifo_size)
1803 		return USHRT_MAX;
1804 	else
1805 		return fifo_size - wm;
1806 }
1807 
1808 /*
1809  * Starting from 'level' set all higher
1810  * levels to 'value' in the "raw" watermarks.
1811  */
vlv_raw_plane_wm_set(struct intel_crtc_state * crtc_state,int level,enum plane_id plane_id,u16 value)1812 static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
1813 				 int level, enum plane_id plane_id, u16 value)
1814 {
1815 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1816 	int num_levels = intel_wm_num_levels(dev_priv);
1817 	bool dirty = false;
1818 
1819 	for (; level < num_levels; level++) {
1820 		struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1821 
1822 		dirty |= raw->plane[plane_id] != value;
1823 		raw->plane[plane_id] = value;
1824 	}
1825 
1826 	return dirty;
1827 }
1828 
vlv_raw_plane_wm_compute(struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state)1829 static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
1830 				     const struct intel_plane_state *plane_state)
1831 {
1832 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1833 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1834 	enum plane_id plane_id = plane->id;
1835 	int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
1836 	int level;
1837 	bool dirty = false;
1838 
1839 	if (!intel_wm_plane_visible(crtc_state, plane_state)) {
1840 		dirty |= vlv_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
1841 		goto out;
1842 	}
1843 
1844 	for (level = 0; level < num_levels; level++) {
1845 		struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1846 		int wm = vlv_compute_wm_level(crtc_state, plane_state, level);
1847 		int max_wm = plane_id == PLANE_CURSOR ? 63 : 511;
1848 
1849 		if (wm > max_wm)
1850 			break;
1851 
1852 		dirty |= raw->plane[plane_id] != wm;
1853 		raw->plane[plane_id] = wm;
1854 	}
1855 
1856 	/* mark all higher levels as invalid */
1857 	dirty |= vlv_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
1858 
1859 out:
1860 	if (dirty)
1861 		drm_dbg_kms(&dev_priv->drm,
1862 			    "%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n",
1863 			    plane->base.name,
1864 			    crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id],
1865 			    crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id],
1866 			    crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]);
1867 
1868 	return dirty;
1869 }
1870 
vlv_raw_plane_wm_is_valid(const struct intel_crtc_state * crtc_state,enum plane_id plane_id,int level)1871 static bool vlv_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
1872 				      enum plane_id plane_id, int level)
1873 {
1874 	const struct g4x_pipe_wm *raw =
1875 		&crtc_state->wm.vlv.raw[level];
1876 	const struct vlv_fifo_state *fifo_state =
1877 		&crtc_state->wm.vlv.fifo_state;
1878 
1879 	return raw->plane[plane_id] <= fifo_state->plane[plane_id];
1880 }
1881 
vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state * crtc_state,int level)1882 static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, int level)
1883 {
1884 	return vlv_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
1885 		vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
1886 		vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE1, level) &&
1887 		vlv_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
1888 }
1889 
vlv_compute_pipe_wm(struct intel_crtc_state * crtc_state)1890 static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
1891 {
1892 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1893 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1894 	struct intel_atomic_state *state =
1895 		to_intel_atomic_state(crtc_state->uapi.state);
1896 	struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
1897 	const struct vlv_fifo_state *fifo_state =
1898 		&crtc_state->wm.vlv.fifo_state;
1899 	int num_active_planes = hweight8(crtc_state->active_planes &
1900 					 ~BIT(PLANE_CURSOR));
1901 	bool needs_modeset = drm_atomic_crtc_needs_modeset(&crtc_state->uapi);
1902 	const struct intel_plane_state *old_plane_state;
1903 	const struct intel_plane_state *new_plane_state;
1904 	struct intel_plane *plane;
1905 	enum plane_id plane_id;
1906 	int level, ret, i;
1907 	unsigned int dirty = 0;
1908 
1909 	for_each_oldnew_intel_plane_in_state(state, plane,
1910 					     old_plane_state,
1911 					     new_plane_state, i) {
1912 		if (new_plane_state->hw.crtc != &crtc->base &&
1913 		    old_plane_state->hw.crtc != &crtc->base)
1914 			continue;
1915 
1916 		if (vlv_raw_plane_wm_compute(crtc_state, new_plane_state))
1917 			dirty |= BIT(plane->id);
1918 	}
1919 
1920 	/*
1921 	 * DSPARB registers may have been reset due to the
1922 	 * power well being turned off. Make sure we restore
1923 	 * them to a consistent state even if no primary/sprite
1924 	 * planes are initially active.
1925 	 */
1926 	if (needs_modeset)
1927 		crtc_state->fifo_changed = true;
1928 
1929 	if (!dirty)
1930 		return 0;
1931 
1932 	/* cursor changes don't warrant a FIFO recompute */
1933 	if (dirty & ~BIT(PLANE_CURSOR)) {
1934 		const struct intel_crtc_state *old_crtc_state =
1935 			intel_atomic_get_old_crtc_state(state, crtc);
1936 		const struct vlv_fifo_state *old_fifo_state =
1937 			&old_crtc_state->wm.vlv.fifo_state;
1938 
1939 		ret = vlv_compute_fifo(crtc_state);
1940 		if (ret)
1941 			return ret;
1942 
1943 		if (needs_modeset ||
1944 		    memcmp(old_fifo_state, fifo_state,
1945 			   sizeof(*fifo_state)) != 0)
1946 			crtc_state->fifo_changed = true;
1947 	}
1948 
1949 	/* initially allow all levels */
1950 	wm_state->num_levels = intel_wm_num_levels(dev_priv);
1951 	/*
1952 	 * Note that enabling cxsr with no primary/sprite planes
1953 	 * enabled can wedge the pipe. Hence we only allow cxsr
1954 	 * with exactly one enabled primary/sprite plane.
1955 	 */
1956 	wm_state->cxsr = crtc->pipe != PIPE_C && num_active_planes == 1;
1957 
1958 	for (level = 0; level < wm_state->num_levels; level++) {
1959 		const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1960 		const int sr_fifo_size = INTEL_NUM_PIPES(dev_priv) * 512 - 1;
1961 
1962 		if (!vlv_raw_crtc_wm_is_valid(crtc_state, level))
1963 			break;
1964 
1965 		for_each_plane_id_on_crtc(crtc, plane_id) {
1966 			wm_state->wm[level].plane[plane_id] =
1967 				vlv_invert_wm_value(raw->plane[plane_id],
1968 						    fifo_state->plane[plane_id]);
1969 		}
1970 
1971 		wm_state->sr[level].plane =
1972 			vlv_invert_wm_value(max3(raw->plane[PLANE_PRIMARY],
1973 						 raw->plane[PLANE_SPRITE0],
1974 						 raw->plane[PLANE_SPRITE1]),
1975 					    sr_fifo_size);
1976 
1977 		wm_state->sr[level].cursor =
1978 			vlv_invert_wm_value(raw->plane[PLANE_CURSOR],
1979 					    63);
1980 	}
1981 
1982 	if (level == 0)
1983 		return -EINVAL;
1984 
1985 	/* limit to only levels we can actually handle */
1986 	wm_state->num_levels = level;
1987 
1988 	/* invalidate the higher levels */
1989 	vlv_invalidate_wms(crtc, wm_state, level);
1990 
1991 	return 0;
1992 }
1993 
1994 #define VLV_FIFO(plane, value) \
1995 	(((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
1996 
vlv_atomic_update_fifo(struct intel_atomic_state * state,struct intel_crtc * crtc)1997 static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
1998 				   struct intel_crtc *crtc)
1999 {
2000 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2001 	struct intel_uncore *uncore = &dev_priv->uncore;
2002 	const struct intel_crtc_state *crtc_state =
2003 		intel_atomic_get_new_crtc_state(state, crtc);
2004 	const struct vlv_fifo_state *fifo_state =
2005 		&crtc_state->wm.vlv.fifo_state;
2006 	int sprite0_start, sprite1_start, fifo_size;
2007 	u32 dsparb, dsparb2, dsparb3;
2008 
2009 	if (!crtc_state->fifo_changed)
2010 		return;
2011 
2012 	sprite0_start = fifo_state->plane[PLANE_PRIMARY];
2013 	sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start;
2014 	fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start;
2015 
2016 	drm_WARN_ON(&dev_priv->drm, fifo_state->plane[PLANE_CURSOR] != 63);
2017 	drm_WARN_ON(&dev_priv->drm, fifo_size != 511);
2018 
2019 	trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size);
2020 
2021 	/*
2022 	 * uncore.lock serves a double purpose here. It allows us to
2023 	 * use the less expensive I915_{READ,WRITE}_FW() functions, and
2024 	 * it protects the DSPARB registers from getting clobbered by
2025 	 * parallel updates from multiple pipes.
2026 	 *
2027 	 * intel_pipe_update_start() has already disabled interrupts
2028 	 * for us, so a plain spin_lock() is sufficient here.
2029 	 */
2030 	spin_lock(&uncore->lock);
2031 
2032 	switch (crtc->pipe) {
2033 	case PIPE_A:
2034 		dsparb = intel_uncore_read_fw(uncore, DSPARB);
2035 		dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
2036 
2037 		dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
2038 			    VLV_FIFO(SPRITEB, 0xff));
2039 		dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
2040 			   VLV_FIFO(SPRITEB, sprite1_start));
2041 
2042 		dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
2043 			     VLV_FIFO(SPRITEB_HI, 0x1));
2044 		dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
2045 			   VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
2046 
2047 		intel_uncore_write_fw(uncore, DSPARB, dsparb);
2048 		intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
2049 		break;
2050 	case PIPE_B:
2051 		dsparb = intel_uncore_read_fw(uncore, DSPARB);
2052 		dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
2053 
2054 		dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
2055 			    VLV_FIFO(SPRITED, 0xff));
2056 		dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
2057 			   VLV_FIFO(SPRITED, sprite1_start));
2058 
2059 		dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
2060 			     VLV_FIFO(SPRITED_HI, 0xff));
2061 		dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
2062 			   VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
2063 
2064 		intel_uncore_write_fw(uncore, DSPARB, dsparb);
2065 		intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
2066 		break;
2067 	case PIPE_C:
2068 		dsparb3 = intel_uncore_read_fw(uncore, DSPARB3);
2069 		dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
2070 
2071 		dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
2072 			     VLV_FIFO(SPRITEF, 0xff));
2073 		dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
2074 			    VLV_FIFO(SPRITEF, sprite1_start));
2075 
2076 		dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
2077 			     VLV_FIFO(SPRITEF_HI, 0xff));
2078 		dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
2079 			   VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
2080 
2081 		intel_uncore_write_fw(uncore, DSPARB3, dsparb3);
2082 		intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
2083 		break;
2084 	default:
2085 		break;
2086 	}
2087 
2088 	intel_uncore_posting_read_fw(uncore, DSPARB);
2089 
2090 	spin_unlock(&uncore->lock);
2091 }
2092 
2093 #undef VLV_FIFO
2094 
vlv_compute_intermediate_wm(struct intel_crtc_state * new_crtc_state)2095 static int vlv_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state)
2096 {
2097 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
2098 	struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate;
2099 	const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal;
2100 	struct intel_atomic_state *intel_state =
2101 		to_intel_atomic_state(new_crtc_state->uapi.state);
2102 	const struct intel_crtc_state *old_crtc_state =
2103 		intel_atomic_get_old_crtc_state(intel_state, crtc);
2104 	const struct vlv_wm_state *active = &old_crtc_state->wm.vlv.optimal;
2105 	int level;
2106 
2107 	if (!new_crtc_state->hw.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) {
2108 		*intermediate = *optimal;
2109 
2110 		intermediate->cxsr = false;
2111 		goto out;
2112 	}
2113 
2114 	intermediate->num_levels = min(optimal->num_levels, active->num_levels);
2115 	intermediate->cxsr = optimal->cxsr && active->cxsr &&
2116 		!new_crtc_state->disable_cxsr;
2117 
2118 	for (level = 0; level < intermediate->num_levels; level++) {
2119 		enum plane_id plane_id;
2120 
2121 		for_each_plane_id_on_crtc(crtc, plane_id) {
2122 			intermediate->wm[level].plane[plane_id] =
2123 				min(optimal->wm[level].plane[plane_id],
2124 				    active->wm[level].plane[plane_id]);
2125 		}
2126 
2127 		intermediate->sr[level].plane = min(optimal->sr[level].plane,
2128 						    active->sr[level].plane);
2129 		intermediate->sr[level].cursor = min(optimal->sr[level].cursor,
2130 						     active->sr[level].cursor);
2131 	}
2132 
2133 	vlv_invalidate_wms(crtc, intermediate, level);
2134 
2135 out:
2136 	/*
2137 	 * If our intermediate WM are identical to the final WM, then we can
2138 	 * omit the post-vblank programming; only update if it's different.
2139 	 */
2140 	if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
2141 		new_crtc_state->wm.need_postvbl_update = true;
2142 
2143 	return 0;
2144 }
2145 
vlv_merge_wm(struct drm_i915_private * dev_priv,struct vlv_wm_values * wm)2146 static void vlv_merge_wm(struct drm_i915_private *dev_priv,
2147 			 struct vlv_wm_values *wm)
2148 {
2149 	struct intel_crtc *crtc;
2150 	int num_active_pipes = 0;
2151 
2152 	wm->level = dev_priv->wm.max_level;
2153 	wm->cxsr = true;
2154 
2155 	for_each_intel_crtc(&dev_priv->drm, crtc) {
2156 		const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
2157 
2158 		if (!crtc->active)
2159 			continue;
2160 
2161 		if (!wm_state->cxsr)
2162 			wm->cxsr = false;
2163 
2164 		num_active_pipes++;
2165 		wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
2166 	}
2167 
2168 	if (num_active_pipes != 1)
2169 		wm->cxsr = false;
2170 
2171 	if (num_active_pipes > 1)
2172 		wm->level = VLV_WM_LEVEL_PM2;
2173 
2174 	for_each_intel_crtc(&dev_priv->drm, crtc) {
2175 		const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
2176 		enum pipe pipe = crtc->pipe;
2177 
2178 		wm->pipe[pipe] = wm_state->wm[wm->level];
2179 		if (crtc->active && wm->cxsr)
2180 			wm->sr = wm_state->sr[wm->level];
2181 
2182 		wm->ddl[pipe].plane[PLANE_PRIMARY] = DDL_PRECISION_HIGH | 2;
2183 		wm->ddl[pipe].plane[PLANE_SPRITE0] = DDL_PRECISION_HIGH | 2;
2184 		wm->ddl[pipe].plane[PLANE_SPRITE1] = DDL_PRECISION_HIGH | 2;
2185 		wm->ddl[pipe].plane[PLANE_CURSOR] = DDL_PRECISION_HIGH | 2;
2186 	}
2187 }
2188 
vlv_program_watermarks(struct drm_i915_private * dev_priv)2189 static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
2190 {
2191 	struct vlv_wm_values *old_wm = &dev_priv->wm.vlv;
2192 	struct vlv_wm_values new_wm = {};
2193 
2194 	vlv_merge_wm(dev_priv, &new_wm);
2195 
2196 	if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
2197 		return;
2198 
2199 	if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
2200 		chv_set_memory_dvfs(dev_priv, false);
2201 
2202 	if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
2203 		chv_set_memory_pm5(dev_priv, false);
2204 
2205 	if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
2206 		_intel_set_memory_cxsr(dev_priv, false);
2207 
2208 	vlv_write_wm_values(dev_priv, &new_wm);
2209 
2210 	if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
2211 		_intel_set_memory_cxsr(dev_priv, true);
2212 
2213 	if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
2214 		chv_set_memory_pm5(dev_priv, true);
2215 
2216 	if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
2217 		chv_set_memory_dvfs(dev_priv, true);
2218 
2219 	*old_wm = new_wm;
2220 }
2221 
vlv_initial_watermarks(struct intel_atomic_state * state,struct intel_crtc * crtc)2222 static void vlv_initial_watermarks(struct intel_atomic_state *state,
2223 				   struct intel_crtc *crtc)
2224 {
2225 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2226 	const struct intel_crtc_state *crtc_state =
2227 		intel_atomic_get_new_crtc_state(state, crtc);
2228 
2229 	mutex_lock(&dev_priv->wm.wm_mutex);
2230 	crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate;
2231 	vlv_program_watermarks(dev_priv);
2232 	mutex_unlock(&dev_priv->wm.wm_mutex);
2233 }
2234 
vlv_optimize_watermarks(struct intel_atomic_state * state,struct intel_crtc * crtc)2235 static void vlv_optimize_watermarks(struct intel_atomic_state *state,
2236 				    struct intel_crtc *crtc)
2237 {
2238 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2239 	const struct intel_crtc_state *crtc_state =
2240 		intel_atomic_get_new_crtc_state(state, crtc);
2241 
2242 	if (!crtc_state->wm.need_postvbl_update)
2243 		return;
2244 
2245 	mutex_lock(&dev_priv->wm.wm_mutex);
2246 	crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
2247 	vlv_program_watermarks(dev_priv);
2248 	mutex_unlock(&dev_priv->wm.wm_mutex);
2249 }
2250 
i965_update_wm(struct intel_crtc * unused_crtc)2251 static void i965_update_wm(struct intel_crtc *unused_crtc)
2252 {
2253 	struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
2254 	struct intel_crtc *crtc;
2255 	int srwm = 1;
2256 	int cursor_sr = 16;
2257 	bool cxsr_enabled;
2258 
2259 	/* Calc sr entries for one plane configs */
2260 	crtc = single_enabled_crtc(dev_priv);
2261 	if (crtc) {
2262 		/* self-refresh has much higher latency */
2263 		static const int sr_latency_ns = 12000;
2264 		const struct drm_display_mode *adjusted_mode =
2265 			&crtc->config->hw.adjusted_mode;
2266 		const struct drm_framebuffer *fb =
2267 			crtc->base.primary->state->fb;
2268 		int clock = adjusted_mode->crtc_clock;
2269 		int htotal = adjusted_mode->crtc_htotal;
2270 		int hdisplay = crtc->config->pipe_src_w;
2271 		int cpp = fb->format->cpp[0];
2272 		int entries;
2273 
2274 		entries = intel_wm_method2(clock, htotal,
2275 					   hdisplay, cpp, sr_latency_ns / 100);
2276 		entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
2277 		srwm = I965_FIFO_SIZE - entries;
2278 		if (srwm < 0)
2279 			srwm = 1;
2280 		srwm &= 0x1ff;
2281 		drm_dbg_kms(&dev_priv->drm,
2282 			    "self-refresh entries: %d, wm: %d\n",
2283 			    entries, srwm);
2284 
2285 		entries = intel_wm_method2(clock, htotal,
2286 					   crtc->base.cursor->state->crtc_w, 4,
2287 					   sr_latency_ns / 100);
2288 		entries = DIV_ROUND_UP(entries,
2289 				       i965_cursor_wm_info.cacheline_size) +
2290 			i965_cursor_wm_info.guard_size;
2291 
2292 		cursor_sr = i965_cursor_wm_info.fifo_size - entries;
2293 		if (cursor_sr > i965_cursor_wm_info.max_wm)
2294 			cursor_sr = i965_cursor_wm_info.max_wm;
2295 
2296 		drm_dbg_kms(&dev_priv->drm,
2297 			    "self-refresh watermark: display plane %d "
2298 			    "cursor %d\n", srwm, cursor_sr);
2299 
2300 		cxsr_enabled = true;
2301 	} else {
2302 		cxsr_enabled = false;
2303 		/* Turn off self refresh if both pipes are enabled */
2304 		intel_set_memory_cxsr(dev_priv, false);
2305 	}
2306 
2307 	drm_dbg_kms(&dev_priv->drm,
2308 		    "Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
2309 		    srwm);
2310 
2311 	/* 965 has limitations... */
2312 	I915_WRITE(DSPFW1, FW_WM(srwm, SR) |
2313 		   FW_WM(8, CURSORB) |
2314 		   FW_WM(8, PLANEB) |
2315 		   FW_WM(8, PLANEA));
2316 	I915_WRITE(DSPFW2, FW_WM(8, CURSORA) |
2317 		   FW_WM(8, PLANEC_OLD));
2318 	/* update cursor SR watermark */
2319 	I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
2320 
2321 	if (cxsr_enabled)
2322 		intel_set_memory_cxsr(dev_priv, true);
2323 }
2324 
2325 #undef FW_WM
2326 
i9xx_update_wm(struct intel_crtc * unused_crtc)2327 static void i9xx_update_wm(struct intel_crtc *unused_crtc)
2328 {
2329 	struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
2330 	const struct intel_watermark_params *wm_info;
2331 	u32 fwater_lo;
2332 	u32 fwater_hi;
2333 	int cwm, srwm = 1;
2334 	int fifo_size;
2335 	int planea_wm, planeb_wm;
2336 	struct intel_crtc *crtc, *enabled = NULL;
2337 
2338 	if (IS_I945GM(dev_priv))
2339 		wm_info = &i945_wm_info;
2340 	else if (!IS_GEN(dev_priv, 2))
2341 		wm_info = &i915_wm_info;
2342 	else
2343 		wm_info = &i830_a_wm_info;
2344 
2345 	fifo_size = dev_priv->display.get_fifo_size(dev_priv, PLANE_A);
2346 	crtc = intel_get_crtc_for_plane(dev_priv, PLANE_A);
2347 	if (intel_crtc_active(crtc)) {
2348 		const struct drm_display_mode *adjusted_mode =
2349 			&crtc->config->hw.adjusted_mode;
2350 		const struct drm_framebuffer *fb =
2351 			crtc->base.primary->state->fb;
2352 		int cpp;
2353 
2354 		if (IS_GEN(dev_priv, 2))
2355 			cpp = 4;
2356 		else
2357 			cpp = fb->format->cpp[0];
2358 
2359 		planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
2360 					       wm_info, fifo_size, cpp,
2361 					       pessimal_latency_ns);
2362 		enabled = crtc;
2363 	} else {
2364 		planea_wm = fifo_size - wm_info->guard_size;
2365 		if (planea_wm > (long)wm_info->max_wm)
2366 			planea_wm = wm_info->max_wm;
2367 	}
2368 
2369 	if (IS_GEN(dev_priv, 2))
2370 		wm_info = &i830_bc_wm_info;
2371 
2372 	fifo_size = dev_priv->display.get_fifo_size(dev_priv, PLANE_B);
2373 	crtc = intel_get_crtc_for_plane(dev_priv, PLANE_B);
2374 	if (intel_crtc_active(crtc)) {
2375 		const struct drm_display_mode *adjusted_mode =
2376 			&crtc->config->hw.adjusted_mode;
2377 		const struct drm_framebuffer *fb =
2378 			crtc->base.primary->state->fb;
2379 		int cpp;
2380 
2381 		if (IS_GEN(dev_priv, 2))
2382 			cpp = 4;
2383 		else
2384 			cpp = fb->format->cpp[0];
2385 
2386 		planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
2387 					       wm_info, fifo_size, cpp,
2388 					       pessimal_latency_ns);
2389 		if (enabled == NULL)
2390 			enabled = crtc;
2391 		else
2392 			enabled = NULL;
2393 	} else {
2394 		planeb_wm = fifo_size - wm_info->guard_size;
2395 		if (planeb_wm > (long)wm_info->max_wm)
2396 			planeb_wm = wm_info->max_wm;
2397 	}
2398 
2399 	drm_dbg_kms(&dev_priv->drm,
2400 		    "FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
2401 
2402 	if (IS_I915GM(dev_priv) && enabled) {
2403 		struct drm_i915_gem_object *obj;
2404 
2405 		obj = intel_fb_obj(enabled->base.primary->state->fb);
2406 
2407 		/* self-refresh seems busted with untiled */
2408 		if (!i915_gem_object_is_tiled(obj))
2409 			enabled = NULL;
2410 	}
2411 
2412 	/*
2413 	 * Overlay gets an aggressive default since video jitter is bad.
2414 	 */
2415 	cwm = 2;
2416 
2417 	/* Play safe and disable self-refresh before adjusting watermarks. */
2418 	intel_set_memory_cxsr(dev_priv, false);
2419 
2420 	/* Calc sr entries for one plane configs */
2421 	if (HAS_FW_BLC(dev_priv) && enabled) {
2422 		/* self-refresh has much higher latency */
2423 		static const int sr_latency_ns = 6000;
2424 		const struct drm_display_mode *adjusted_mode =
2425 			&enabled->config->hw.adjusted_mode;
2426 		const struct drm_framebuffer *fb =
2427 			enabled->base.primary->state->fb;
2428 		int clock = adjusted_mode->crtc_clock;
2429 		int htotal = adjusted_mode->crtc_htotal;
2430 		int hdisplay = enabled->config->pipe_src_w;
2431 		int cpp;
2432 		int entries;
2433 
2434 		if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
2435 			cpp = 4;
2436 		else
2437 			cpp = fb->format->cpp[0];
2438 
2439 		entries = intel_wm_method2(clock, htotal, hdisplay, cpp,
2440 					   sr_latency_ns / 100);
2441 		entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
2442 		drm_dbg_kms(&dev_priv->drm,
2443 			    "self-refresh entries: %d\n", entries);
2444 		srwm = wm_info->fifo_size - entries;
2445 		if (srwm < 0)
2446 			srwm = 1;
2447 
2448 		if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
2449 			I915_WRITE(FW_BLC_SELF,
2450 				   FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
2451 		else
2452 			I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
2453 	}
2454 
2455 	drm_dbg_kms(&dev_priv->drm,
2456 		    "Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
2457 		     planea_wm, planeb_wm, cwm, srwm);
2458 
2459 	fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
2460 	fwater_hi = (cwm & 0x1f);
2461 
2462 	/* Set request length to 8 cachelines per fetch */
2463 	fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
2464 	fwater_hi = fwater_hi | (1 << 8);
2465 
2466 	I915_WRITE(FW_BLC, fwater_lo);
2467 	I915_WRITE(FW_BLC2, fwater_hi);
2468 
2469 	if (enabled)
2470 		intel_set_memory_cxsr(dev_priv, true);
2471 }
2472 
i845_update_wm(struct intel_crtc * unused_crtc)2473 static void i845_update_wm(struct intel_crtc *unused_crtc)
2474 {
2475 	struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
2476 	struct intel_crtc *crtc;
2477 	const struct drm_display_mode *adjusted_mode;
2478 	u32 fwater_lo;
2479 	int planea_wm;
2480 
2481 	crtc = single_enabled_crtc(dev_priv);
2482 	if (crtc == NULL)
2483 		return;
2484 
2485 	adjusted_mode = &crtc->config->hw.adjusted_mode;
2486 	planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
2487 				       &i845_wm_info,
2488 				       dev_priv->display.get_fifo_size(dev_priv, PLANE_A),
2489 				       4, pessimal_latency_ns);
2490 	fwater_lo = I915_READ(FW_BLC) & ~0xfff;
2491 	fwater_lo |= (3<<8) | planea_wm;
2492 
2493 	drm_dbg_kms(&dev_priv->drm,
2494 		    "Setting FIFO watermarks - A: %d\n", planea_wm);
2495 
2496 	I915_WRITE(FW_BLC, fwater_lo);
2497 }
2498 
2499 /* latency must be in 0.1us units. */
ilk_wm_method1(unsigned int pixel_rate,unsigned int cpp,unsigned int latency)2500 static unsigned int ilk_wm_method1(unsigned int pixel_rate,
2501 				   unsigned int cpp,
2502 				   unsigned int latency)
2503 {
2504 	unsigned int ret;
2505 
2506 	ret = intel_wm_method1(pixel_rate, cpp, latency);
2507 	ret = DIV_ROUND_UP(ret, 64) + 2;
2508 
2509 	return ret;
2510 }
2511 
2512 /* latency must be in 0.1us units. */
ilk_wm_method2(unsigned int pixel_rate,unsigned int htotal,unsigned int width,unsigned int cpp,unsigned int latency)2513 static unsigned int ilk_wm_method2(unsigned int pixel_rate,
2514 				   unsigned int htotal,
2515 				   unsigned int width,
2516 				   unsigned int cpp,
2517 				   unsigned int latency)
2518 {
2519 	unsigned int ret;
2520 
2521 	ret = intel_wm_method2(pixel_rate, htotal,
2522 			       width, cpp, latency);
2523 	ret = DIV_ROUND_UP(ret, 64) + 2;
2524 
2525 	return ret;
2526 }
2527 
ilk_wm_fbc(u32 pri_val,u32 horiz_pixels,u8 cpp)2528 static u32 ilk_wm_fbc(u32 pri_val, u32 horiz_pixels, u8 cpp)
2529 {
2530 	/*
2531 	 * Neither of these should be possible since this function shouldn't be
2532 	 * called if the CRTC is off or the plane is invisible.  But let's be
2533 	 * extra paranoid to avoid a potential divide-by-zero if we screw up
2534 	 * elsewhere in the driver.
2535 	 */
2536 	if (WARN_ON(!cpp))
2537 		return 0;
2538 	if (WARN_ON(!horiz_pixels))
2539 		return 0;
2540 
2541 	return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2;
2542 }
2543 
2544 struct ilk_wm_maximums {
2545 	u16 pri;
2546 	u16 spr;
2547 	u16 cur;
2548 	u16 fbc;
2549 };
2550 
2551 /*
2552  * For both WM_PIPE and WM_LP.
2553  * mem_value must be in 0.1us units.
2554  */
ilk_compute_pri_wm(const struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state,u32 mem_value,bool is_lp)2555 static u32 ilk_compute_pri_wm(const struct intel_crtc_state *crtc_state,
2556 			      const struct intel_plane_state *plane_state,
2557 			      u32 mem_value, bool is_lp)
2558 {
2559 	u32 method1, method2;
2560 	int cpp;
2561 
2562 	if (mem_value == 0)
2563 		return U32_MAX;
2564 
2565 	if (!intel_wm_plane_visible(crtc_state, plane_state))
2566 		return 0;
2567 
2568 	cpp = plane_state->hw.fb->format->cpp[0];
2569 
2570 	method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
2571 
2572 	if (!is_lp)
2573 		return method1;
2574 
2575 	method2 = ilk_wm_method2(crtc_state->pixel_rate,
2576 				 crtc_state->hw.adjusted_mode.crtc_htotal,
2577 				 drm_rect_width(&plane_state->uapi.dst),
2578 				 cpp, mem_value);
2579 
2580 	return min(method1, method2);
2581 }
2582 
2583 /*
2584  * For both WM_PIPE and WM_LP.
2585  * mem_value must be in 0.1us units.
2586  */
ilk_compute_spr_wm(const struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state,u32 mem_value)2587 static u32 ilk_compute_spr_wm(const struct intel_crtc_state *crtc_state,
2588 			      const struct intel_plane_state *plane_state,
2589 			      u32 mem_value)
2590 {
2591 	u32 method1, method2;
2592 	int cpp;
2593 
2594 	if (mem_value == 0)
2595 		return U32_MAX;
2596 
2597 	if (!intel_wm_plane_visible(crtc_state, plane_state))
2598 		return 0;
2599 
2600 	cpp = plane_state->hw.fb->format->cpp[0];
2601 
2602 	method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
2603 	method2 = ilk_wm_method2(crtc_state->pixel_rate,
2604 				 crtc_state->hw.adjusted_mode.crtc_htotal,
2605 				 drm_rect_width(&plane_state->uapi.dst),
2606 				 cpp, mem_value);
2607 	return min(method1, method2);
2608 }
2609 
2610 /*
2611  * For both WM_PIPE and WM_LP.
2612  * mem_value must be in 0.1us units.
2613  */
ilk_compute_cur_wm(const struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state,u32 mem_value)2614 static u32 ilk_compute_cur_wm(const struct intel_crtc_state *crtc_state,
2615 			      const struct intel_plane_state *plane_state,
2616 			      u32 mem_value)
2617 {
2618 	int cpp;
2619 
2620 	if (mem_value == 0)
2621 		return U32_MAX;
2622 
2623 	if (!intel_wm_plane_visible(crtc_state, plane_state))
2624 		return 0;
2625 
2626 	cpp = plane_state->hw.fb->format->cpp[0];
2627 
2628 	return ilk_wm_method2(crtc_state->pixel_rate,
2629 			      crtc_state->hw.adjusted_mode.crtc_htotal,
2630 			      drm_rect_width(&plane_state->uapi.dst),
2631 			      cpp, mem_value);
2632 }
2633 
2634 /* Only for WM_LP. */
ilk_compute_fbc_wm(const struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state,u32 pri_val)2635 static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
2636 			      const struct intel_plane_state *plane_state,
2637 			      u32 pri_val)
2638 {
2639 	int cpp;
2640 
2641 	if (!intel_wm_plane_visible(crtc_state, plane_state))
2642 		return 0;
2643 
2644 	cpp = plane_state->hw.fb->format->cpp[0];
2645 
2646 	return ilk_wm_fbc(pri_val, drm_rect_width(&plane_state->uapi.dst),
2647 			  cpp);
2648 }
2649 
2650 static unsigned int
ilk_display_fifo_size(const struct drm_i915_private * dev_priv)2651 ilk_display_fifo_size(const struct drm_i915_private *dev_priv)
2652 {
2653 	if (INTEL_GEN(dev_priv) >= 8)
2654 		return 3072;
2655 	else if (INTEL_GEN(dev_priv) >= 7)
2656 		return 768;
2657 	else
2658 		return 512;
2659 }
2660 
2661 static unsigned int
ilk_plane_wm_reg_max(const struct drm_i915_private * dev_priv,int level,bool is_sprite)2662 ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
2663 		     int level, bool is_sprite)
2664 {
2665 	if (INTEL_GEN(dev_priv) >= 8)
2666 		/* BDW primary/sprite plane watermarks */
2667 		return level == 0 ? 255 : 2047;
2668 	else if (INTEL_GEN(dev_priv) >= 7)
2669 		/* IVB/HSW primary/sprite plane watermarks */
2670 		return level == 0 ? 127 : 1023;
2671 	else if (!is_sprite)
2672 		/* ILK/SNB primary plane watermarks */
2673 		return level == 0 ? 127 : 511;
2674 	else
2675 		/* ILK/SNB sprite plane watermarks */
2676 		return level == 0 ? 63 : 255;
2677 }
2678 
2679 static unsigned int
ilk_cursor_wm_reg_max(const struct drm_i915_private * dev_priv,int level)2680 ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level)
2681 {
2682 	if (INTEL_GEN(dev_priv) >= 7)
2683 		return level == 0 ? 63 : 255;
2684 	else
2685 		return level == 0 ? 31 : 63;
2686 }
2687 
ilk_fbc_wm_reg_max(const struct drm_i915_private * dev_priv)2688 static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv)
2689 {
2690 	if (INTEL_GEN(dev_priv) >= 8)
2691 		return 31;
2692 	else
2693 		return 15;
2694 }
2695 
2696 /* Calculate the maximum primary/sprite plane watermark */
ilk_plane_wm_max(const struct drm_i915_private * dev_priv,int level,const struct intel_wm_config * config,enum intel_ddb_partitioning ddb_partitioning,bool is_sprite)2697 static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
2698 				     int level,
2699 				     const struct intel_wm_config *config,
2700 				     enum intel_ddb_partitioning ddb_partitioning,
2701 				     bool is_sprite)
2702 {
2703 	unsigned int fifo_size = ilk_display_fifo_size(dev_priv);
2704 
2705 	/* if sprites aren't enabled, sprites get nothing */
2706 	if (is_sprite && !config->sprites_enabled)
2707 		return 0;
2708 
2709 	/* HSW allows LP1+ watermarks even with multiple pipes */
2710 	if (level == 0 || config->num_pipes_active > 1) {
2711 		fifo_size /= INTEL_NUM_PIPES(dev_priv);
2712 
2713 		/*
2714 		 * For some reason the non self refresh
2715 		 * FIFO size is only half of the self
2716 		 * refresh FIFO size on ILK/SNB.
2717 		 */
2718 		if (INTEL_GEN(dev_priv) <= 6)
2719 			fifo_size /= 2;
2720 	}
2721 
2722 	if (config->sprites_enabled) {
2723 		/* level 0 is always calculated with 1:1 split */
2724 		if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
2725 			if (is_sprite)
2726 				fifo_size *= 5;
2727 			fifo_size /= 6;
2728 		} else {
2729 			fifo_size /= 2;
2730 		}
2731 	}
2732 
2733 	/* clamp to max that the registers can hold */
2734 	return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite));
2735 }
2736 
2737 /* Calculate the maximum cursor plane watermark */
ilk_cursor_wm_max(const struct drm_i915_private * dev_priv,int level,const struct intel_wm_config * config)2738 static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv,
2739 				      int level,
2740 				      const struct intel_wm_config *config)
2741 {
2742 	/* HSW LP1+ watermarks w/ multiple pipes */
2743 	if (level > 0 && config->num_pipes_active > 1)
2744 		return 64;
2745 
2746 	/* otherwise just report max that registers can hold */
2747 	return ilk_cursor_wm_reg_max(dev_priv, level);
2748 }
2749 
ilk_compute_wm_maximums(const struct drm_i915_private * dev_priv,int level,const struct intel_wm_config * config,enum intel_ddb_partitioning ddb_partitioning,struct ilk_wm_maximums * max)2750 static void ilk_compute_wm_maximums(const struct drm_i915_private *dev_priv,
2751 				    int level,
2752 				    const struct intel_wm_config *config,
2753 				    enum intel_ddb_partitioning ddb_partitioning,
2754 				    struct ilk_wm_maximums *max)
2755 {
2756 	max->pri = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, false);
2757 	max->spr = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, true);
2758 	max->cur = ilk_cursor_wm_max(dev_priv, level, config);
2759 	max->fbc = ilk_fbc_wm_reg_max(dev_priv);
2760 }
2761 
ilk_compute_wm_reg_maximums(const struct drm_i915_private * dev_priv,int level,struct ilk_wm_maximums * max)2762 static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv,
2763 					int level,
2764 					struct ilk_wm_maximums *max)
2765 {
2766 	max->pri = ilk_plane_wm_reg_max(dev_priv, level, false);
2767 	max->spr = ilk_plane_wm_reg_max(dev_priv, level, true);
2768 	max->cur = ilk_cursor_wm_reg_max(dev_priv, level);
2769 	max->fbc = ilk_fbc_wm_reg_max(dev_priv);
2770 }
2771 
ilk_validate_wm_level(int level,const struct ilk_wm_maximums * max,struct intel_wm_level * result)2772 static bool ilk_validate_wm_level(int level,
2773 				  const struct ilk_wm_maximums *max,
2774 				  struct intel_wm_level *result)
2775 {
2776 	bool ret;
2777 
2778 	/* already determined to be invalid? */
2779 	if (!result->enable)
2780 		return false;
2781 
2782 	result->enable = result->pri_val <= max->pri &&
2783 			 result->spr_val <= max->spr &&
2784 			 result->cur_val <= max->cur;
2785 
2786 	ret = result->enable;
2787 
2788 	/*
2789 	 * HACK until we can pre-compute everything,
2790 	 * and thus fail gracefully if LP0 watermarks
2791 	 * are exceeded...
2792 	 */
2793 	if (level == 0 && !result->enable) {
2794 		if (result->pri_val > max->pri)
2795 			DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
2796 				      level, result->pri_val, max->pri);
2797 		if (result->spr_val > max->spr)
2798 			DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
2799 				      level, result->spr_val, max->spr);
2800 		if (result->cur_val > max->cur)
2801 			DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
2802 				      level, result->cur_val, max->cur);
2803 
2804 		result->pri_val = min_t(u32, result->pri_val, max->pri);
2805 		result->spr_val = min_t(u32, result->spr_val, max->spr);
2806 		result->cur_val = min_t(u32, result->cur_val, max->cur);
2807 		result->enable = true;
2808 	}
2809 
2810 	return ret;
2811 }
2812 
ilk_compute_wm_level(const struct drm_i915_private * dev_priv,const struct intel_crtc * crtc,int level,struct intel_crtc_state * crtc_state,const struct intel_plane_state * pristate,const struct intel_plane_state * sprstate,const struct intel_plane_state * curstate,struct intel_wm_level * result)2813 static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
2814 				 const struct intel_crtc *crtc,
2815 				 int level,
2816 				 struct intel_crtc_state *crtc_state,
2817 				 const struct intel_plane_state *pristate,
2818 				 const struct intel_plane_state *sprstate,
2819 				 const struct intel_plane_state *curstate,
2820 				 struct intel_wm_level *result)
2821 {
2822 	u16 pri_latency = dev_priv->wm.pri_latency[level];
2823 	u16 spr_latency = dev_priv->wm.spr_latency[level];
2824 	u16 cur_latency = dev_priv->wm.cur_latency[level];
2825 
2826 	/* WM1+ latency values stored in 0.5us units */
2827 	if (level > 0) {
2828 		pri_latency *= 5;
2829 		spr_latency *= 5;
2830 		cur_latency *= 5;
2831 	}
2832 
2833 	if (pristate) {
2834 		result->pri_val = ilk_compute_pri_wm(crtc_state, pristate,
2835 						     pri_latency, level);
2836 		result->fbc_val = ilk_compute_fbc_wm(crtc_state, pristate, result->pri_val);
2837 	}
2838 
2839 	if (sprstate)
2840 		result->spr_val = ilk_compute_spr_wm(crtc_state, sprstate, spr_latency);
2841 
2842 	if (curstate)
2843 		result->cur_val = ilk_compute_cur_wm(crtc_state, curstate, cur_latency);
2844 
2845 	result->enable = true;
2846 }
2847 
intel_read_wm_latency(struct drm_i915_private * dev_priv,u16 wm[])2848 static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
2849 				  u16 wm[])
2850 {
2851 	struct intel_uncore *uncore = &dev_priv->uncore;
2852 
2853 	if (INTEL_GEN(dev_priv) >= 9) {
2854 		u32 val;
2855 		int ret, i;
2856 		int level, max_level = ilk_wm_max_level(dev_priv);
2857 
2858 		/* read the first set of memory latencies[0:3] */
2859 		val = 0; /* data0 to be programmed to 0 for first set */
2860 		ret = sandybridge_pcode_read(dev_priv,
2861 					     GEN9_PCODE_READ_MEM_LATENCY,
2862 					     &val, NULL);
2863 
2864 		if (ret) {
2865 			drm_err(&dev_priv->drm,
2866 				"SKL Mailbox read error = %d\n", ret);
2867 			return;
2868 		}
2869 
2870 		wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2871 		wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2872 				GEN9_MEM_LATENCY_LEVEL_MASK;
2873 		wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2874 				GEN9_MEM_LATENCY_LEVEL_MASK;
2875 		wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2876 				GEN9_MEM_LATENCY_LEVEL_MASK;
2877 
2878 		/* read the second set of memory latencies[4:7] */
2879 		val = 1; /* data0 to be programmed to 1 for second set */
2880 		ret = sandybridge_pcode_read(dev_priv,
2881 					     GEN9_PCODE_READ_MEM_LATENCY,
2882 					     &val, NULL);
2883 		if (ret) {
2884 			drm_err(&dev_priv->drm,
2885 				"SKL Mailbox read error = %d\n", ret);
2886 			return;
2887 		}
2888 
2889 		wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2890 		wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2891 				GEN9_MEM_LATENCY_LEVEL_MASK;
2892 		wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2893 				GEN9_MEM_LATENCY_LEVEL_MASK;
2894 		wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2895 				GEN9_MEM_LATENCY_LEVEL_MASK;
2896 
2897 		/*
2898 		 * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
2899 		 * need to be disabled. We make sure to sanitize the values out
2900 		 * of the punit to satisfy this requirement.
2901 		 */
2902 		for (level = 1; level <= max_level; level++) {
2903 			if (wm[level] == 0) {
2904 				for (i = level + 1; i <= max_level; i++)
2905 					wm[i] = 0;
2906 				break;
2907 			}
2908 		}
2909 
2910 		/*
2911 		 * WaWmMemoryReadLatency:skl+,glk
2912 		 *
2913 		 * punit doesn't take into account the read latency so we need
2914 		 * to add 2us to the various latency levels we retrieve from the
2915 		 * punit when level 0 response data us 0us.
2916 		 */
2917 		if (wm[0] == 0) {
2918 			wm[0] += 2;
2919 			for (level = 1; level <= max_level; level++) {
2920 				if (wm[level] == 0)
2921 					break;
2922 				wm[level] += 2;
2923 			}
2924 		}
2925 
2926 		/*
2927 		 * WA Level-0 adjustment for 16GB DIMMs: SKL+
2928 		 * If we could not get dimm info enable this WA to prevent from
2929 		 * any underrun. If not able to get Dimm info assume 16GB dimm
2930 		 * to avoid any underrun.
2931 		 */
2932 		if (dev_priv->dram_info.is_16gb_dimm)
2933 			wm[0] += 1;
2934 
2935 	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2936 		u64 sskpd = intel_uncore_read64(uncore, MCH_SSKPD);
2937 
2938 		wm[0] = (sskpd >> 56) & 0xFF;
2939 		if (wm[0] == 0)
2940 			wm[0] = sskpd & 0xF;
2941 		wm[1] = (sskpd >> 4) & 0xFF;
2942 		wm[2] = (sskpd >> 12) & 0xFF;
2943 		wm[3] = (sskpd >> 20) & 0x1FF;
2944 		wm[4] = (sskpd >> 32) & 0x1FF;
2945 	} else if (INTEL_GEN(dev_priv) >= 6) {
2946 		u32 sskpd = intel_uncore_read(uncore, MCH_SSKPD);
2947 
2948 		wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
2949 		wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
2950 		wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
2951 		wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
2952 	} else if (INTEL_GEN(dev_priv) >= 5) {
2953 		u32 mltr = intel_uncore_read(uncore, MLTR_ILK);
2954 
2955 		/* ILK primary LP0 latency is 700 ns */
2956 		wm[0] = 7;
2957 		wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
2958 		wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
2959 	} else {
2960 		MISSING_CASE(INTEL_DEVID(dev_priv));
2961 	}
2962 }
2963 
intel_fixup_spr_wm_latency(struct drm_i915_private * dev_priv,u16 wm[5])2964 static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
2965 				       u16 wm[5])
2966 {
2967 	/* ILK sprite LP0 latency is 1300 ns */
2968 	if (IS_GEN(dev_priv, 5))
2969 		wm[0] = 13;
2970 }
2971 
intel_fixup_cur_wm_latency(struct drm_i915_private * dev_priv,u16 wm[5])2972 static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
2973 				       u16 wm[5])
2974 {
2975 	/* ILK cursor LP0 latency is 1300 ns */
2976 	if (IS_GEN(dev_priv, 5))
2977 		wm[0] = 13;
2978 }
2979 
ilk_wm_max_level(const struct drm_i915_private * dev_priv)2980 int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
2981 {
2982 	/* how many WM levels are we expecting */
2983 	if (INTEL_GEN(dev_priv) >= 9)
2984 		return 7;
2985 	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
2986 		return 4;
2987 	else if (INTEL_GEN(dev_priv) >= 6)
2988 		return 3;
2989 	else
2990 		return 2;
2991 }
2992 
intel_print_wm_latency(struct drm_i915_private * dev_priv,const char * name,const u16 wm[])2993 static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
2994 				   const char *name,
2995 				   const u16 wm[])
2996 {
2997 	int level, max_level = ilk_wm_max_level(dev_priv);
2998 
2999 	for (level = 0; level <= max_level; level++) {
3000 		unsigned int latency = wm[level];
3001 
3002 		if (latency == 0) {
3003 			drm_dbg_kms(&dev_priv->drm,
3004 				    "%s WM%d latency not provided\n",
3005 				    name, level);
3006 			continue;
3007 		}
3008 
3009 		/*
3010 		 * - latencies are in us on gen9.
3011 		 * - before then, WM1+ latency values are in 0.5us units
3012 		 */
3013 		if (INTEL_GEN(dev_priv) >= 9)
3014 			latency *= 10;
3015 		else if (level > 0)
3016 			latency *= 5;
3017 
3018 		drm_dbg_kms(&dev_priv->drm,
3019 			    "%s WM%d latency %u (%u.%u usec)\n", name, level,
3020 			    wm[level], latency / 10, latency % 10);
3021 	}
3022 }
3023 
ilk_increase_wm_latency(struct drm_i915_private * dev_priv,u16 wm[5],u16 min)3024 static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
3025 				    u16 wm[5], u16 min)
3026 {
3027 	int level, max_level = ilk_wm_max_level(dev_priv);
3028 
3029 	if (wm[0] >= min)
3030 		return false;
3031 
3032 	wm[0] = max(wm[0], min);
3033 	for (level = 1; level <= max_level; level++)
3034 		wm[level] = max_t(u16, wm[level], DIV_ROUND_UP(min, 5));
3035 
3036 	return true;
3037 }
3038 
snb_wm_latency_quirk(struct drm_i915_private * dev_priv)3039 static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
3040 {
3041 	bool changed;
3042 
3043 	/*
3044 	 * The BIOS provided WM memory latency values are often
3045 	 * inadequate for high resolution displays. Adjust them.
3046 	 */
3047 	changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12);
3048 	changed |= ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12);
3049 	changed |= ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
3050 
3051 	if (!changed)
3052 		return;
3053 
3054 	drm_dbg_kms(&dev_priv->drm,
3055 		    "WM latency values increased to avoid potential underruns\n");
3056 	intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
3057 	intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
3058 	intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3059 }
3060 
snb_wm_lp3_irq_quirk(struct drm_i915_private * dev_priv)3061 static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
3062 {
3063 	/*
3064 	 * On some SNB machines (Thinkpad X220 Tablet at least)
3065 	 * LP3 usage can cause vblank interrupts to be lost.
3066 	 * The DEIIR bit will go high but it looks like the CPU
3067 	 * never gets interrupted.
3068 	 *
3069 	 * It's not clear whether other interrupt source could
3070 	 * be affected or if this is somehow limited to vblank
3071 	 * interrupts only. To play it safe we disable LP3
3072 	 * watermarks entirely.
3073 	 */
3074 	if (dev_priv->wm.pri_latency[3] == 0 &&
3075 	    dev_priv->wm.spr_latency[3] == 0 &&
3076 	    dev_priv->wm.cur_latency[3] == 0)
3077 		return;
3078 
3079 	dev_priv->wm.pri_latency[3] = 0;
3080 	dev_priv->wm.spr_latency[3] = 0;
3081 	dev_priv->wm.cur_latency[3] = 0;
3082 
3083 	drm_dbg_kms(&dev_priv->drm,
3084 		    "LP3 watermarks disabled due to potential for lost interrupts\n");
3085 	intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
3086 	intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
3087 	intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3088 }
3089 
ilk_setup_wm_latency(struct drm_i915_private * dev_priv)3090 static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
3091 {
3092 	intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
3093 
3094 	memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
3095 	       sizeof(dev_priv->wm.pri_latency));
3096 	memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
3097 	       sizeof(dev_priv->wm.pri_latency));
3098 
3099 	intel_fixup_spr_wm_latency(dev_priv, dev_priv->wm.spr_latency);
3100 	intel_fixup_cur_wm_latency(dev_priv, dev_priv->wm.cur_latency);
3101 
3102 	intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
3103 	intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
3104 	intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3105 
3106 	if (IS_GEN(dev_priv, 6)) {
3107 		snb_wm_latency_quirk(dev_priv);
3108 		snb_wm_lp3_irq_quirk(dev_priv);
3109 	}
3110 }
3111 
skl_setup_wm_latency(struct drm_i915_private * dev_priv)3112 static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
3113 {
3114 	intel_read_wm_latency(dev_priv, dev_priv->wm.skl_latency);
3115 	intel_print_wm_latency(dev_priv, "Gen9 Plane", dev_priv->wm.skl_latency);
3116 }
3117 
ilk_validate_pipe_wm(const struct drm_i915_private * dev_priv,struct intel_pipe_wm * pipe_wm)3118 static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
3119 				 struct intel_pipe_wm *pipe_wm)
3120 {
3121 	/* LP0 watermark maximums depend on this pipe alone */
3122 	const struct intel_wm_config config = {
3123 		.num_pipes_active = 1,
3124 		.sprites_enabled = pipe_wm->sprites_enabled,
3125 		.sprites_scaled = pipe_wm->sprites_scaled,
3126 	};
3127 	struct ilk_wm_maximums max;
3128 
3129 	/* LP0 watermarks always use 1/2 DDB partitioning */
3130 	ilk_compute_wm_maximums(dev_priv, 0, &config, INTEL_DDB_PART_1_2, &max);
3131 
3132 	/* At least LP0 must be valid */
3133 	if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
3134 		drm_dbg_kms(&dev_priv->drm, "LP0 watermark invalid\n");
3135 		return false;
3136 	}
3137 
3138 	return true;
3139 }
3140 
3141 /* Compute new watermarks for the pipe */
ilk_compute_pipe_wm(struct intel_crtc_state * crtc_state)3142 static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state)
3143 {
3144 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3145 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3146 	struct intel_pipe_wm *pipe_wm;
3147 	struct intel_plane *plane;
3148 	const struct intel_plane_state *plane_state;
3149 	const struct intel_plane_state *pristate = NULL;
3150 	const struct intel_plane_state *sprstate = NULL;
3151 	const struct intel_plane_state *curstate = NULL;
3152 	int level, max_level = ilk_wm_max_level(dev_priv), usable_level;
3153 	struct ilk_wm_maximums max;
3154 
3155 	pipe_wm = &crtc_state->wm.ilk.optimal;
3156 
3157 	intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
3158 		if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
3159 			pristate = plane_state;
3160 		else if (plane->base.type == DRM_PLANE_TYPE_OVERLAY)
3161 			sprstate = plane_state;
3162 		else if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
3163 			curstate = plane_state;
3164 	}
3165 
3166 	pipe_wm->pipe_enabled = crtc_state->hw.active;
3167 	if (sprstate) {
3168 		pipe_wm->sprites_enabled = sprstate->uapi.visible;
3169 		pipe_wm->sprites_scaled = sprstate->uapi.visible &&
3170 			(drm_rect_width(&sprstate->uapi.dst) != drm_rect_width(&sprstate->uapi.src) >> 16 ||
3171 			 drm_rect_height(&sprstate->uapi.dst) != drm_rect_height(&sprstate->uapi.src) >> 16);
3172 	}
3173 
3174 	usable_level = max_level;
3175 
3176 	/* ILK/SNB: LP2+ watermarks only w/o sprites */
3177 	if (INTEL_GEN(dev_priv) <= 6 && pipe_wm->sprites_enabled)
3178 		usable_level = 1;
3179 
3180 	/* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
3181 	if (pipe_wm->sprites_scaled)
3182 		usable_level = 0;
3183 
3184 	memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
3185 	ilk_compute_wm_level(dev_priv, crtc, 0, crtc_state,
3186 			     pristate, sprstate, curstate, &pipe_wm->wm[0]);
3187 
3188 	if (!ilk_validate_pipe_wm(dev_priv, pipe_wm))
3189 		return -EINVAL;
3190 
3191 	ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
3192 
3193 	for (level = 1; level <= usable_level; level++) {
3194 		struct intel_wm_level *wm = &pipe_wm->wm[level];
3195 
3196 		ilk_compute_wm_level(dev_priv, crtc, level, crtc_state,
3197 				     pristate, sprstate, curstate, wm);
3198 
3199 		/*
3200 		 * Disable any watermark level that exceeds the
3201 		 * register maximums since such watermarks are
3202 		 * always invalid.
3203 		 */
3204 		if (!ilk_validate_wm_level(level, &max, wm)) {
3205 			memset(wm, 0, sizeof(*wm));
3206 			break;
3207 		}
3208 	}
3209 
3210 	return 0;
3211 }
3212 
3213 /*
3214  * Build a set of 'intermediate' watermark values that satisfy both the old
3215  * state and the new state.  These can be programmed to the hardware
3216  * immediately.
3217  */
ilk_compute_intermediate_wm(struct intel_crtc_state * newstate)3218 static int ilk_compute_intermediate_wm(struct intel_crtc_state *newstate)
3219 {
3220 	struct intel_crtc *intel_crtc = to_intel_crtc(newstate->uapi.crtc);
3221 	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
3222 	struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
3223 	struct intel_atomic_state *intel_state =
3224 		to_intel_atomic_state(newstate->uapi.state);
3225 	const struct intel_crtc_state *oldstate =
3226 		intel_atomic_get_old_crtc_state(intel_state, intel_crtc);
3227 	const struct intel_pipe_wm *b = &oldstate->wm.ilk.optimal;
3228 	int level, max_level = ilk_wm_max_level(dev_priv);
3229 
3230 	/*
3231 	 * Start with the final, target watermarks, then combine with the
3232 	 * currently active watermarks to get values that are safe both before
3233 	 * and after the vblank.
3234 	 */
3235 	*a = newstate->wm.ilk.optimal;
3236 	if (!newstate->hw.active || drm_atomic_crtc_needs_modeset(&newstate->uapi) ||
3237 	    intel_state->skip_intermediate_wm)
3238 		return 0;
3239 
3240 	a->pipe_enabled |= b->pipe_enabled;
3241 	a->sprites_enabled |= b->sprites_enabled;
3242 	a->sprites_scaled |= b->sprites_scaled;
3243 
3244 	for (level = 0; level <= max_level; level++) {
3245 		struct intel_wm_level *a_wm = &a->wm[level];
3246 		const struct intel_wm_level *b_wm = &b->wm[level];
3247 
3248 		a_wm->enable &= b_wm->enable;
3249 		a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val);
3250 		a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val);
3251 		a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val);
3252 		a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val);
3253 	}
3254 
3255 	/*
3256 	 * We need to make sure that these merged watermark values are
3257 	 * actually a valid configuration themselves.  If they're not,
3258 	 * there's no safe way to transition from the old state to
3259 	 * the new state, so we need to fail the atomic transaction.
3260 	 */
3261 	if (!ilk_validate_pipe_wm(dev_priv, a))
3262 		return -EINVAL;
3263 
3264 	/*
3265 	 * If our intermediate WM are identical to the final WM, then we can
3266 	 * omit the post-vblank programming; only update if it's different.
3267 	 */
3268 	if (memcmp(a, &newstate->wm.ilk.optimal, sizeof(*a)) != 0)
3269 		newstate->wm.need_postvbl_update = true;
3270 
3271 	return 0;
3272 }
3273 
3274 /*
3275  * Merge the watermarks from all active pipes for a specific level.
3276  */
ilk_merge_wm_level(struct drm_i915_private * dev_priv,int level,struct intel_wm_level * ret_wm)3277 static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
3278 			       int level,
3279 			       struct intel_wm_level *ret_wm)
3280 {
3281 	const struct intel_crtc *intel_crtc;
3282 
3283 	ret_wm->enable = true;
3284 
3285 	for_each_intel_crtc(&dev_priv->drm, intel_crtc) {
3286 		const struct intel_pipe_wm *active = &intel_crtc->wm.active.ilk;
3287 		const struct intel_wm_level *wm = &active->wm[level];
3288 
3289 		if (!active->pipe_enabled)
3290 			continue;
3291 
3292 		/*
3293 		 * The watermark values may have been used in the past,
3294 		 * so we must maintain them in the registers for some
3295 		 * time even if the level is now disabled.
3296 		 */
3297 		if (!wm->enable)
3298 			ret_wm->enable = false;
3299 
3300 		ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
3301 		ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
3302 		ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
3303 		ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
3304 	}
3305 }
3306 
3307 /*
3308  * Merge all low power watermarks for all active pipes.
3309  */
ilk_wm_merge(struct drm_i915_private * dev_priv,const struct intel_wm_config * config,const struct ilk_wm_maximums * max,struct intel_pipe_wm * merged)3310 static void ilk_wm_merge(struct drm_i915_private *dev_priv,
3311 			 const struct intel_wm_config *config,
3312 			 const struct ilk_wm_maximums *max,
3313 			 struct intel_pipe_wm *merged)
3314 {
3315 	int level, max_level = ilk_wm_max_level(dev_priv);
3316 	int last_enabled_level = max_level;
3317 
3318 	/* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
3319 	if ((INTEL_GEN(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) &&
3320 	    config->num_pipes_active > 1)
3321 		last_enabled_level = 0;
3322 
3323 	/* ILK: FBC WM must be disabled always */
3324 	merged->fbc_wm_enabled = INTEL_GEN(dev_priv) >= 6;
3325 
3326 	/* merge each WM1+ level */
3327 	for (level = 1; level <= max_level; level++) {
3328 		struct intel_wm_level *wm = &merged->wm[level];
3329 
3330 		ilk_merge_wm_level(dev_priv, level, wm);
3331 
3332 		if (level > last_enabled_level)
3333 			wm->enable = false;
3334 		else if (!ilk_validate_wm_level(level, max, wm))
3335 			/* make sure all following levels get disabled */
3336 			last_enabled_level = level - 1;
3337 
3338 		/*
3339 		 * The spec says it is preferred to disable
3340 		 * FBC WMs instead of disabling a WM level.
3341 		 */
3342 		if (wm->fbc_val > max->fbc) {
3343 			if (wm->enable)
3344 				merged->fbc_wm_enabled = false;
3345 			wm->fbc_val = 0;
3346 		}
3347 	}
3348 
3349 	/* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
3350 	/*
3351 	 * FIXME this is racy. FBC might get enabled later.
3352 	 * What we should check here is whether FBC can be
3353 	 * enabled sometime later.
3354 	 */
3355 	if (IS_GEN(dev_priv, 5) && !merged->fbc_wm_enabled &&
3356 	    intel_fbc_is_active(dev_priv)) {
3357 		for (level = 2; level <= max_level; level++) {
3358 			struct intel_wm_level *wm = &merged->wm[level];
3359 
3360 			wm->enable = false;
3361 		}
3362 	}
3363 }
3364 
ilk_wm_lp_to_level(int wm_lp,const struct intel_pipe_wm * pipe_wm)3365 static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
3366 {
3367 	/* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
3368 	return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
3369 }
3370 
3371 /* The value we need to program into the WM_LPx latency field */
ilk_wm_lp_latency(struct drm_i915_private * dev_priv,int level)3372 static unsigned int ilk_wm_lp_latency(struct drm_i915_private *dev_priv,
3373 				      int level)
3374 {
3375 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
3376 		return 2 * level;
3377 	else
3378 		return dev_priv->wm.pri_latency[level];
3379 }
3380 
ilk_compute_wm_results(struct drm_i915_private * dev_priv,const struct intel_pipe_wm * merged,enum intel_ddb_partitioning partitioning,struct ilk_wm_values * results)3381 static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
3382 				   const struct intel_pipe_wm *merged,
3383 				   enum intel_ddb_partitioning partitioning,
3384 				   struct ilk_wm_values *results)
3385 {
3386 	struct intel_crtc *intel_crtc;
3387 	int level, wm_lp;
3388 
3389 	results->enable_fbc_wm = merged->fbc_wm_enabled;
3390 	results->partitioning = partitioning;
3391 
3392 	/* LP1+ register values */
3393 	for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
3394 		const struct intel_wm_level *r;
3395 
3396 		level = ilk_wm_lp_to_level(wm_lp, merged);
3397 
3398 		r = &merged->wm[level];
3399 
3400 		/*
3401 		 * Maintain the watermark values even if the level is
3402 		 * disabled. Doing otherwise could cause underruns.
3403 		 */
3404 		results->wm_lp[wm_lp - 1] =
3405 			(ilk_wm_lp_latency(dev_priv, level) << WM1_LP_LATENCY_SHIFT) |
3406 			(r->pri_val << WM1_LP_SR_SHIFT) |
3407 			r->cur_val;
3408 
3409 		if (r->enable)
3410 			results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
3411 
3412 		if (INTEL_GEN(dev_priv) >= 8)
3413 			results->wm_lp[wm_lp - 1] |=
3414 				r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
3415 		else
3416 			results->wm_lp[wm_lp - 1] |=
3417 				r->fbc_val << WM1_LP_FBC_SHIFT;
3418 
3419 		/*
3420 		 * Always set WM1S_LP_EN when spr_val != 0, even if the
3421 		 * level is disabled. Doing otherwise could cause underruns.
3422 		 */
3423 		if (INTEL_GEN(dev_priv) <= 6 && r->spr_val) {
3424 			drm_WARN_ON(&dev_priv->drm, wm_lp != 1);
3425 			results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
3426 		} else
3427 			results->wm_lp_spr[wm_lp - 1] = r->spr_val;
3428 	}
3429 
3430 	/* LP0 register values */
3431 	for_each_intel_crtc(&dev_priv->drm, intel_crtc) {
3432 		enum pipe pipe = intel_crtc->pipe;
3433 		const struct intel_pipe_wm *pipe_wm = &intel_crtc->wm.active.ilk;
3434 		const struct intel_wm_level *r = &pipe_wm->wm[0];
3435 
3436 		if (drm_WARN_ON(&dev_priv->drm, !r->enable))
3437 			continue;
3438 
3439 		results->wm_pipe[pipe] =
3440 			(r->pri_val << WM0_PIPE_PLANE_SHIFT) |
3441 			(r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
3442 			r->cur_val;
3443 	}
3444 }
3445 
3446 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
3447  * case both are at the same level. Prefer r1 in case they're the same. */
3448 static struct intel_pipe_wm *
ilk_find_best_result(struct drm_i915_private * dev_priv,struct intel_pipe_wm * r1,struct intel_pipe_wm * r2)3449 ilk_find_best_result(struct drm_i915_private *dev_priv,
3450 		     struct intel_pipe_wm *r1,
3451 		     struct intel_pipe_wm *r2)
3452 {
3453 	int level, max_level = ilk_wm_max_level(dev_priv);
3454 	int level1 = 0, level2 = 0;
3455 
3456 	for (level = 1; level <= max_level; level++) {
3457 		if (r1->wm[level].enable)
3458 			level1 = level;
3459 		if (r2->wm[level].enable)
3460 			level2 = level;
3461 	}
3462 
3463 	if (level1 == level2) {
3464 		if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
3465 			return r2;
3466 		else
3467 			return r1;
3468 	} else if (level1 > level2) {
3469 		return r1;
3470 	} else {
3471 		return r2;
3472 	}
3473 }
3474 
3475 /* dirty bits used to track which watermarks need changes */
3476 #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
3477 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
3478 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
3479 #define WM_DIRTY_FBC (1 << 24)
3480 #define WM_DIRTY_DDB (1 << 25)
3481 
ilk_compute_wm_dirty(struct drm_i915_private * dev_priv,const struct ilk_wm_values * old,const struct ilk_wm_values * new)3482 static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
3483 					 const struct ilk_wm_values *old,
3484 					 const struct ilk_wm_values *new)
3485 {
3486 	unsigned int dirty = 0;
3487 	enum pipe pipe;
3488 	int wm_lp;
3489 
3490 	for_each_pipe(dev_priv, pipe) {
3491 		if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
3492 			dirty |= WM_DIRTY_PIPE(pipe);
3493 			/* Must disable LP1+ watermarks too */
3494 			dirty |= WM_DIRTY_LP_ALL;
3495 		}
3496 	}
3497 
3498 	if (old->enable_fbc_wm != new->enable_fbc_wm) {
3499 		dirty |= WM_DIRTY_FBC;
3500 		/* Must disable LP1+ watermarks too */
3501 		dirty |= WM_DIRTY_LP_ALL;
3502 	}
3503 
3504 	if (old->partitioning != new->partitioning) {
3505 		dirty |= WM_DIRTY_DDB;
3506 		/* Must disable LP1+ watermarks too */
3507 		dirty |= WM_DIRTY_LP_ALL;
3508 	}
3509 
3510 	/* LP1+ watermarks already deemed dirty, no need to continue */
3511 	if (dirty & WM_DIRTY_LP_ALL)
3512 		return dirty;
3513 
3514 	/* Find the lowest numbered LP1+ watermark in need of an update... */
3515 	for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
3516 		if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
3517 		    old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
3518 			break;
3519 	}
3520 
3521 	/* ...and mark it and all higher numbered LP1+ watermarks as dirty */
3522 	for (; wm_lp <= 3; wm_lp++)
3523 		dirty |= WM_DIRTY_LP(wm_lp);
3524 
3525 	return dirty;
3526 }
3527 
_ilk_disable_lp_wm(struct drm_i915_private * dev_priv,unsigned int dirty)3528 static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
3529 			       unsigned int dirty)
3530 {
3531 	struct ilk_wm_values *previous = &dev_priv->wm.hw;
3532 	bool changed = false;
3533 
3534 	if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
3535 		previous->wm_lp[2] &= ~WM1_LP_SR_EN;
3536 		I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
3537 		changed = true;
3538 	}
3539 	if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
3540 		previous->wm_lp[1] &= ~WM1_LP_SR_EN;
3541 		I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
3542 		changed = true;
3543 	}
3544 	if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
3545 		previous->wm_lp[0] &= ~WM1_LP_SR_EN;
3546 		I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
3547 		changed = true;
3548 	}
3549 
3550 	/*
3551 	 * Don't touch WM1S_LP_EN here.
3552 	 * Doing so could cause underruns.
3553 	 */
3554 
3555 	return changed;
3556 }
3557 
3558 /*
3559  * The spec says we shouldn't write when we don't need, because every write
3560  * causes WMs to be re-evaluated, expending some power.
3561  */
ilk_write_wm_values(struct drm_i915_private * dev_priv,struct ilk_wm_values * results)3562 static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
3563 				struct ilk_wm_values *results)
3564 {
3565 	struct ilk_wm_values *previous = &dev_priv->wm.hw;
3566 	unsigned int dirty;
3567 	u32 val;
3568 
3569 	dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
3570 	if (!dirty)
3571 		return;
3572 
3573 	_ilk_disable_lp_wm(dev_priv, dirty);
3574 
3575 	if (dirty & WM_DIRTY_PIPE(PIPE_A))
3576 		I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
3577 	if (dirty & WM_DIRTY_PIPE(PIPE_B))
3578 		I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
3579 	if (dirty & WM_DIRTY_PIPE(PIPE_C))
3580 		I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
3581 
3582 	if (dirty & WM_DIRTY_DDB) {
3583 		if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3584 			val = I915_READ(WM_MISC);
3585 			if (results->partitioning == INTEL_DDB_PART_1_2)
3586 				val &= ~WM_MISC_DATA_PARTITION_5_6;
3587 			else
3588 				val |= WM_MISC_DATA_PARTITION_5_6;
3589 			I915_WRITE(WM_MISC, val);
3590 		} else {
3591 			val = I915_READ(DISP_ARB_CTL2);
3592 			if (results->partitioning == INTEL_DDB_PART_1_2)
3593 				val &= ~DISP_DATA_PARTITION_5_6;
3594 			else
3595 				val |= DISP_DATA_PARTITION_5_6;
3596 			I915_WRITE(DISP_ARB_CTL2, val);
3597 		}
3598 	}
3599 
3600 	if (dirty & WM_DIRTY_FBC) {
3601 		val = I915_READ(DISP_ARB_CTL);
3602 		if (results->enable_fbc_wm)
3603 			val &= ~DISP_FBC_WM_DIS;
3604 		else
3605 			val |= DISP_FBC_WM_DIS;
3606 		I915_WRITE(DISP_ARB_CTL, val);
3607 	}
3608 
3609 	if (dirty & WM_DIRTY_LP(1) &&
3610 	    previous->wm_lp_spr[0] != results->wm_lp_spr[0])
3611 		I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
3612 
3613 	if (INTEL_GEN(dev_priv) >= 7) {
3614 		if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
3615 			I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
3616 		if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
3617 			I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
3618 	}
3619 
3620 	if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
3621 		I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
3622 	if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
3623 		I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
3624 	if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
3625 		I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
3626 
3627 	dev_priv->wm.hw = *results;
3628 }
3629 
ilk_disable_lp_wm(struct drm_i915_private * dev_priv)3630 bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv)
3631 {
3632 	return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
3633 }
3634 
intel_enabled_dbuf_slices_mask(struct drm_i915_private * dev_priv)3635 u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *dev_priv)
3636 {
3637 	int i;
3638 	int max_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
3639 	u8 enabled_slices_mask = 0;
3640 
3641 	for (i = 0; i < max_slices; i++) {
3642 		if (I915_READ(DBUF_CTL_S(i)) & DBUF_POWER_STATE)
3643 			enabled_slices_mask |= BIT(i);
3644 	}
3645 
3646 	return enabled_slices_mask;
3647 }
3648 
3649 /*
3650  * FIXME: We still don't have the proper code detect if we need to apply the WA,
3651  * so assume we'll always need it in order to avoid underruns.
3652  */
skl_needs_memory_bw_wa(struct drm_i915_private * dev_priv)3653 static bool skl_needs_memory_bw_wa(struct drm_i915_private *dev_priv)
3654 {
3655 	return IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv);
3656 }
3657 
3658 static bool
intel_has_sagv(struct drm_i915_private * dev_priv)3659 intel_has_sagv(struct drm_i915_private *dev_priv)
3660 {
3661 	return (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) &&
3662 		dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED;
3663 }
3664 
3665 static void
skl_setup_sagv_block_time(struct drm_i915_private * dev_priv)3666 skl_setup_sagv_block_time(struct drm_i915_private *dev_priv)
3667 {
3668 	if (INTEL_GEN(dev_priv) >= 12) {
3669 		u32 val = 0;
3670 		int ret;
3671 
3672 		ret = sandybridge_pcode_read(dev_priv,
3673 					     GEN12_PCODE_READ_SAGV_BLOCK_TIME_US,
3674 					     &val, NULL);
3675 		if (!ret) {
3676 			dev_priv->sagv_block_time_us = val;
3677 			return;
3678 		}
3679 
3680 		drm_dbg(&dev_priv->drm, "Couldn't read SAGV block time!\n");
3681 	} else if (IS_GEN(dev_priv, 11)) {
3682 		dev_priv->sagv_block_time_us = 10;
3683 		return;
3684 	} else if (IS_GEN(dev_priv, 10)) {
3685 		dev_priv->sagv_block_time_us = 20;
3686 		return;
3687 	} else if (IS_GEN(dev_priv, 9)) {
3688 		dev_priv->sagv_block_time_us = 30;
3689 		return;
3690 	} else {
3691 		MISSING_CASE(INTEL_GEN(dev_priv));
3692 	}
3693 
3694 	/* Default to an unusable block time */
3695 	dev_priv->sagv_block_time_us = -1;
3696 }
3697 
3698 /*
3699  * SAGV dynamically adjusts the system agent voltage and clock frequencies
3700  * depending on power and performance requirements. The display engine access
3701  * to system memory is blocked during the adjustment time. Because of the
3702  * blocking time, having this enabled can cause full system hangs and/or pipe
3703  * underruns if we don't meet all of the following requirements:
3704  *
3705  *  - <= 1 pipe enabled
3706  *  - All planes can enable watermarks for latencies >= SAGV engine block time
3707  *  - We're not using an interlaced display configuration
3708  */
3709 int
intel_enable_sagv(struct drm_i915_private * dev_priv)3710 intel_enable_sagv(struct drm_i915_private *dev_priv)
3711 {
3712 	int ret;
3713 
3714 	if (!intel_has_sagv(dev_priv))
3715 		return 0;
3716 
3717 	if (dev_priv->sagv_status == I915_SAGV_ENABLED)
3718 		return 0;
3719 
3720 	drm_dbg_kms(&dev_priv->drm, "Enabling SAGV\n");
3721 	ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
3722 				      GEN9_SAGV_ENABLE);
3723 
3724 	/* We don't need to wait for SAGV when enabling */
3725 
3726 	/*
3727 	 * Some skl systems, pre-release machines in particular,
3728 	 * don't actually have SAGV.
3729 	 */
3730 	if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
3731 		drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n");
3732 		dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
3733 		return 0;
3734 	} else if (ret < 0) {
3735 		drm_err(&dev_priv->drm, "Failed to enable SAGV\n");
3736 		return ret;
3737 	}
3738 
3739 	dev_priv->sagv_status = I915_SAGV_ENABLED;
3740 	return 0;
3741 }
3742 
3743 int
intel_disable_sagv(struct drm_i915_private * dev_priv)3744 intel_disable_sagv(struct drm_i915_private *dev_priv)
3745 {
3746 	int ret;
3747 
3748 	if (!intel_has_sagv(dev_priv))
3749 		return 0;
3750 
3751 	if (dev_priv->sagv_status == I915_SAGV_DISABLED)
3752 		return 0;
3753 
3754 	drm_dbg_kms(&dev_priv->drm, "Disabling SAGV\n");
3755 	/* bspec says to keep retrying for at least 1 ms */
3756 	ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL,
3757 				GEN9_SAGV_DISABLE,
3758 				GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
3759 				1);
3760 	/*
3761 	 * Some skl systems, pre-release machines in particular,
3762 	 * don't actually have SAGV.
3763 	 */
3764 	if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
3765 		drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n");
3766 		dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
3767 		return 0;
3768 	} else if (ret < 0) {
3769 		drm_err(&dev_priv->drm, "Failed to disable SAGV (%d)\n", ret);
3770 		return ret;
3771 	}
3772 
3773 	dev_priv->sagv_status = I915_SAGV_DISABLED;
3774 	return 0;
3775 }
3776 
intel_sagv_pre_plane_update(struct intel_atomic_state * state)3777 void intel_sagv_pre_plane_update(struct intel_atomic_state *state)
3778 {
3779 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3780 	const struct intel_bw_state *new_bw_state;
3781 	const struct intel_bw_state *old_bw_state;
3782 	u32 new_mask = 0;
3783 
3784 	/*
3785 	 * Just return if we can't control SAGV or don't have it.
3786 	 * This is different from situation when we have SAGV but just can't
3787 	 * afford it due to DBuf limitation - in case if SAGV is completely
3788 	 * disabled in a BIOS, we are not even allowed to send a PCode request,
3789 	 * as it will throw an error. So have to check it here.
3790 	 */
3791 	if (!intel_has_sagv(dev_priv))
3792 		return;
3793 
3794 	new_bw_state = intel_atomic_get_new_bw_state(state);
3795 	if (!new_bw_state)
3796 		return;
3797 
3798 	if (INTEL_GEN(dev_priv) < 11 && !intel_can_enable_sagv(dev_priv, new_bw_state)) {
3799 		intel_disable_sagv(dev_priv);
3800 		return;
3801 	}
3802 
3803 	old_bw_state = intel_atomic_get_old_bw_state(state);
3804 	/*
3805 	 * Nothing to mask
3806 	 */
3807 	if (new_bw_state->qgv_points_mask == old_bw_state->qgv_points_mask)
3808 		return;
3809 
3810 	new_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
3811 
3812 	/*
3813 	 * If new mask is zero - means there is nothing to mask,
3814 	 * we can only unmask, which should be done in unmask.
3815 	 */
3816 	if (!new_mask)
3817 		return;
3818 
3819 	/*
3820 	 * Restrict required qgv points before updating the configuration.
3821 	 * According to BSpec we can't mask and unmask qgv points at the same
3822 	 * time. Also masking should be done before updating the configuration
3823 	 * and unmasking afterwards.
3824 	 */
3825 	icl_pcode_restrict_qgv_points(dev_priv, new_mask);
3826 }
3827 
intel_sagv_post_plane_update(struct intel_atomic_state * state)3828 void intel_sagv_post_plane_update(struct intel_atomic_state *state)
3829 {
3830 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3831 	const struct intel_bw_state *new_bw_state;
3832 	const struct intel_bw_state *old_bw_state;
3833 	u32 new_mask = 0;
3834 
3835 	/*
3836 	 * Just return if we can't control SAGV or don't have it.
3837 	 * This is different from situation when we have SAGV but just can't
3838 	 * afford it due to DBuf limitation - in case if SAGV is completely
3839 	 * disabled in a BIOS, we are not even allowed to send a PCode request,
3840 	 * as it will throw an error. So have to check it here.
3841 	 */
3842 	if (!intel_has_sagv(dev_priv))
3843 		return;
3844 
3845 	new_bw_state = intel_atomic_get_new_bw_state(state);
3846 	if (!new_bw_state)
3847 		return;
3848 
3849 	if (INTEL_GEN(dev_priv) < 11 && intel_can_enable_sagv(dev_priv, new_bw_state)) {
3850 		intel_enable_sagv(dev_priv);
3851 		return;
3852 	}
3853 
3854 	old_bw_state = intel_atomic_get_old_bw_state(state);
3855 	/*
3856 	 * Nothing to unmask
3857 	 */
3858 	if (new_bw_state->qgv_points_mask == old_bw_state->qgv_points_mask)
3859 		return;
3860 
3861 	new_mask = new_bw_state->qgv_points_mask;
3862 
3863 	/*
3864 	 * Allow required qgv points after updating the configuration.
3865 	 * According to BSpec we can't mask and unmask qgv points at the same
3866 	 * time. Also masking should be done before updating the configuration
3867 	 * and unmasking afterwards.
3868 	 */
3869 	icl_pcode_restrict_qgv_points(dev_priv, new_mask);
3870 }
3871 
skl_crtc_can_enable_sagv(const struct intel_crtc_state * crtc_state)3872 static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
3873 {
3874 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3875 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3876 	struct intel_plane *plane;
3877 	const struct intel_plane_state *plane_state;
3878 	int level, latency;
3879 
3880 	if (!intel_has_sagv(dev_priv))
3881 		return false;
3882 
3883 	if (!crtc_state->hw.active)
3884 		return true;
3885 
3886 	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3887 		return false;
3888 
3889 	intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
3890 		const struct skl_plane_wm *wm =
3891 			&crtc_state->wm.skl.optimal.planes[plane->id];
3892 
3893 		/* Skip this plane if it's not enabled */
3894 		if (!wm->wm[0].plane_en)
3895 			continue;
3896 
3897 		/* Find the highest enabled wm level for this plane */
3898 		for (level = ilk_wm_max_level(dev_priv);
3899 		     !wm->wm[level].plane_en; --level)
3900 		     { }
3901 
3902 		latency = dev_priv->wm.skl_latency[level];
3903 
3904 		if (skl_needs_memory_bw_wa(dev_priv) &&
3905 		    plane_state->uapi.fb->modifier ==
3906 		    I915_FORMAT_MOD_X_TILED)
3907 			latency += 15;
3908 
3909 		/*
3910 		 * If any of the planes on this pipe don't enable wm levels that
3911 		 * incur memory latencies higher than sagv_block_time_us we
3912 		 * can't enable SAGV.
3913 		 */
3914 		if (latency < dev_priv->sagv_block_time_us)
3915 			return false;
3916 	}
3917 
3918 	return true;
3919 }
3920 
tgl_crtc_can_enable_sagv(const struct intel_crtc_state * crtc_state)3921 static bool tgl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
3922 {
3923 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3924 	enum plane_id plane_id;
3925 
3926 	if (!crtc_state->hw.active)
3927 		return true;
3928 
3929 	for_each_plane_id_on_crtc(crtc, plane_id) {
3930 		const struct skl_ddb_entry *plane_alloc =
3931 			&crtc_state->wm.skl.plane_ddb_y[plane_id];
3932 		const struct skl_plane_wm *wm =
3933 			&crtc_state->wm.skl.optimal.planes[plane_id];
3934 
3935 		if (skl_ddb_entry_size(plane_alloc) < wm->sagv_wm0.min_ddb_alloc)
3936 			return false;
3937 	}
3938 
3939 	return true;
3940 }
3941 
intel_crtc_can_enable_sagv(const struct intel_crtc_state * crtc_state)3942 static bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
3943 {
3944 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3945 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3946 
3947 	if (INTEL_GEN(dev_priv) >= 12)
3948 		return tgl_crtc_can_enable_sagv(crtc_state);
3949 	else
3950 		return skl_crtc_can_enable_sagv(crtc_state);
3951 }
3952 
intel_can_enable_sagv(struct drm_i915_private * dev_priv,const struct intel_bw_state * bw_state)3953 bool intel_can_enable_sagv(struct drm_i915_private *dev_priv,
3954 			   const struct intel_bw_state *bw_state)
3955 {
3956 	if (INTEL_GEN(dev_priv) < 11 &&
3957 	    bw_state->active_pipes && !is_power_of_2(bw_state->active_pipes))
3958 		return false;
3959 
3960 	return bw_state->pipe_sagv_reject == 0;
3961 }
3962 
intel_compute_sagv_mask(struct intel_atomic_state * state)3963 static int intel_compute_sagv_mask(struct intel_atomic_state *state)
3964 {
3965 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3966 	int ret;
3967 	struct intel_crtc *crtc;
3968 	struct intel_crtc_state *new_crtc_state;
3969 	struct intel_bw_state *new_bw_state = NULL;
3970 	const struct intel_bw_state *old_bw_state = NULL;
3971 	int i;
3972 
3973 	for_each_new_intel_crtc_in_state(state, crtc,
3974 					 new_crtc_state, i) {
3975 		new_bw_state = intel_atomic_get_bw_state(state);
3976 		if (IS_ERR(new_bw_state))
3977 			return PTR_ERR(new_bw_state);
3978 
3979 		old_bw_state = intel_atomic_get_old_bw_state(state);
3980 
3981 		if (intel_crtc_can_enable_sagv(new_crtc_state))
3982 			new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe);
3983 		else
3984 			new_bw_state->pipe_sagv_reject |= BIT(crtc->pipe);
3985 	}
3986 
3987 	if (!new_bw_state)
3988 		return 0;
3989 
3990 	new_bw_state->active_pipes =
3991 		intel_calc_active_pipes(state, old_bw_state->active_pipes);
3992 
3993 	if (new_bw_state->active_pipes != old_bw_state->active_pipes) {
3994 		ret = intel_atomic_lock_global_state(&new_bw_state->base);
3995 		if (ret)
3996 			return ret;
3997 	}
3998 
3999 	if (intel_can_enable_sagv(dev_priv, new_bw_state) !=
4000 	    intel_can_enable_sagv(dev_priv, old_bw_state)) {
4001 		ret = intel_atomic_serialize_global_state(&new_bw_state->base);
4002 		if (ret)
4003 			return ret;
4004 	} else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
4005 		ret = intel_atomic_lock_global_state(&new_bw_state->base);
4006 		if (ret)
4007 			return ret;
4008 	}
4009 
4010 	for_each_new_intel_crtc_in_state(state, crtc,
4011 					 new_crtc_state, i) {
4012 		struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal;
4013 
4014 		/*
4015 		 * We store use_sagv_wm in the crtc state rather than relying on
4016 		 * that bw state since we have no convenient way to get at the
4017 		 * latter from the plane commit hooks (especially in the legacy
4018 		 * cursor case)
4019 		 */
4020 		pipe_wm->use_sagv_wm = INTEL_GEN(dev_priv) >= 12 &&
4021 				       intel_can_enable_sagv(dev_priv, new_bw_state);
4022 	}
4023 
4024 	return 0;
4025 }
4026 
4027 /*
4028  * Calculate initial DBuf slice offset, based on slice size
4029  * and mask(i.e if slice size is 1024 and second slice is enabled
4030  * offset would be 1024)
4031  */
4032 static unsigned int
icl_get_first_dbuf_slice_offset(u32 dbuf_slice_mask,u32 slice_size,u32 ddb_size)4033 icl_get_first_dbuf_slice_offset(u32 dbuf_slice_mask,
4034 				u32 slice_size,
4035 				u32 ddb_size)
4036 {
4037 	unsigned int offset = 0;
4038 
4039 	if (!dbuf_slice_mask)
4040 		return 0;
4041 
4042 	offset = (ffs(dbuf_slice_mask) - 1) * slice_size;
4043 
4044 	WARN_ON(offset >= ddb_size);
4045 	return offset;
4046 }
4047 
intel_get_ddb_size(struct drm_i915_private * dev_priv)4048 u16 intel_get_ddb_size(struct drm_i915_private *dev_priv)
4049 {
4050 	u16 ddb_size = INTEL_INFO(dev_priv)->ddb_size;
4051 	drm_WARN_ON(&dev_priv->drm, ddb_size == 0);
4052 
4053 	if (INTEL_GEN(dev_priv) < 11)
4054 		return ddb_size - 4; /* 4 blocks for bypass path allocation */
4055 
4056 	return ddb_size;
4057 }
4058 
skl_ddb_dbuf_slice_mask(struct drm_i915_private * dev_priv,const struct skl_ddb_entry * entry)4059 u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *dev_priv,
4060 			    const struct skl_ddb_entry *entry)
4061 {
4062 	u32 slice_mask = 0;
4063 	u16 ddb_size = intel_get_ddb_size(dev_priv);
4064 	u16 num_supported_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
4065 	u16 slice_size = ddb_size / num_supported_slices;
4066 	u16 start_slice;
4067 	u16 end_slice;
4068 
4069 	if (!skl_ddb_entry_size(entry))
4070 		return 0;
4071 
4072 	start_slice = entry->start / slice_size;
4073 	end_slice = (entry->end - 1) / slice_size;
4074 
4075 	/*
4076 	 * Per plane DDB entry can in a really worst case be on multiple slices
4077 	 * but single entry is anyway contigious.
4078 	 */
4079 	while (start_slice <= end_slice) {
4080 		slice_mask |= BIT(start_slice);
4081 		start_slice++;
4082 	}
4083 
4084 	return slice_mask;
4085 }
4086 
4087 static u8 skl_compute_dbuf_slices(const struct intel_crtc_state *crtc_state,
4088 				  u8 active_pipes);
4089 
4090 static int
skl_ddb_get_pipe_allocation_limits(struct drm_i915_private * dev_priv,const struct intel_crtc_state * crtc_state,const u64 total_data_rate,struct skl_ddb_entry * alloc,int * num_active)4091 skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
4092 				   const struct intel_crtc_state *crtc_state,
4093 				   const u64 total_data_rate,
4094 				   struct skl_ddb_entry *alloc, /* out */
4095 				   int *num_active /* out */)
4096 {
4097 	struct drm_atomic_state *state = crtc_state->uapi.state;
4098 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
4099 	struct drm_crtc *for_crtc = crtc_state->uapi.crtc;
4100 	const struct intel_crtc *crtc;
4101 	u32 pipe_width = 0, total_width_in_range = 0, width_before_pipe_in_range = 0;
4102 	enum pipe for_pipe = to_intel_crtc(for_crtc)->pipe;
4103 	struct intel_dbuf_state *new_dbuf_state =
4104 		intel_atomic_get_new_dbuf_state(intel_state);
4105 	const struct intel_dbuf_state *old_dbuf_state =
4106 		intel_atomic_get_old_dbuf_state(intel_state);
4107 	u8 active_pipes = new_dbuf_state->active_pipes;
4108 	u16 ddb_size;
4109 	u32 ddb_range_size;
4110 	u32 i;
4111 	u32 dbuf_slice_mask;
4112 	u32 offset;
4113 	u32 slice_size;
4114 	u32 total_slice_mask;
4115 	u32 start, end;
4116 	int ret;
4117 
4118 	*num_active = hweight8(active_pipes);
4119 
4120 	if (!crtc_state->hw.active) {
4121 		alloc->start = 0;
4122 		alloc->end = 0;
4123 		return 0;
4124 	}
4125 
4126 	ddb_size = intel_get_ddb_size(dev_priv);
4127 
4128 	slice_size = ddb_size / INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
4129 
4130 	/*
4131 	 * If the state doesn't change the active CRTC's or there is no
4132 	 * modeset request, then there's no need to recalculate;
4133 	 * the existing pipe allocation limits should remain unchanged.
4134 	 * Note that we're safe from racing commits since any racing commit
4135 	 * that changes the active CRTC list or do modeset would need to
4136 	 * grab _all_ crtc locks, including the one we currently hold.
4137 	 */
4138 	if (old_dbuf_state->active_pipes == new_dbuf_state->active_pipes &&
4139 	    !dev_priv->wm.distrust_bios_wm) {
4140 		/*
4141 		 * alloc may be cleared by clear_intel_crtc_state,
4142 		 * copy from old state to be sure
4143 		 *
4144 		 * FIXME get rid of this mess
4145 		 */
4146 		*alloc = to_intel_crtc_state(for_crtc->state)->wm.skl.ddb;
4147 		return 0;
4148 	}
4149 
4150 	/*
4151 	 * Get allowed DBuf slices for correspondent pipe and platform.
4152 	 */
4153 	dbuf_slice_mask = skl_compute_dbuf_slices(crtc_state, active_pipes);
4154 
4155 	/*
4156 	 * Figure out at which DBuf slice we start, i.e if we start at Dbuf S2
4157 	 * and slice size is 1024, the offset would be 1024
4158 	 */
4159 	offset = icl_get_first_dbuf_slice_offset(dbuf_slice_mask,
4160 						 slice_size, ddb_size);
4161 
4162 	/*
4163 	 * Figure out total size of allowed DBuf slices, which is basically
4164 	 * a number of allowed slices for that pipe multiplied by slice size.
4165 	 * Inside of this
4166 	 * range ddb entries are still allocated in proportion to display width.
4167 	 */
4168 	ddb_range_size = hweight8(dbuf_slice_mask) * slice_size;
4169 
4170 	/*
4171 	 * Watermark/ddb requirement highly depends upon width of the
4172 	 * framebuffer, So instead of allocating DDB equally among pipes
4173 	 * distribute DDB based on resolution/width of the display.
4174 	 */
4175 	total_slice_mask = dbuf_slice_mask;
4176 	for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
4177 		const struct drm_display_mode *adjusted_mode =
4178 			&crtc_state->hw.adjusted_mode;
4179 		enum pipe pipe = crtc->pipe;
4180 		int hdisplay, vdisplay;
4181 		u32 pipe_dbuf_slice_mask;
4182 
4183 		if (!crtc_state->hw.active)
4184 			continue;
4185 
4186 		pipe_dbuf_slice_mask = skl_compute_dbuf_slices(crtc_state,
4187 							       active_pipes);
4188 
4189 		/*
4190 		 * According to BSpec pipe can share one dbuf slice with another
4191 		 * pipes or pipe can use multiple dbufs, in both cases we
4192 		 * account for other pipes only if they have exactly same mask.
4193 		 * However we need to account how many slices we should enable
4194 		 * in total.
4195 		 */
4196 		total_slice_mask |= pipe_dbuf_slice_mask;
4197 
4198 		/*
4199 		 * Do not account pipes using other slice sets
4200 		 * luckily as of current BSpec slice sets do not partially
4201 		 * intersect(pipes share either same one slice or same slice set
4202 		 * i.e no partial intersection), so it is enough to check for
4203 		 * equality for now.
4204 		 */
4205 		if (dbuf_slice_mask != pipe_dbuf_slice_mask)
4206 			continue;
4207 
4208 		drm_mode_get_hv_timing(adjusted_mode, &hdisplay, &vdisplay);
4209 
4210 		total_width_in_range += hdisplay;
4211 
4212 		if (pipe < for_pipe)
4213 			width_before_pipe_in_range += hdisplay;
4214 		else if (pipe == for_pipe)
4215 			pipe_width = hdisplay;
4216 	}
4217 
4218 	/*
4219 	 * FIXME: For now we always enable slice S1 as per
4220 	 * the Bspec display initialization sequence.
4221 	 */
4222 	new_dbuf_state->enabled_slices = total_slice_mask | BIT(DBUF_S1);
4223 
4224 	if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices) {
4225 		ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
4226 		if (ret)
4227 			return ret;
4228 	}
4229 
4230 	start = ddb_range_size * width_before_pipe_in_range / total_width_in_range;
4231 	end = ddb_range_size *
4232 		(width_before_pipe_in_range + pipe_width) / total_width_in_range;
4233 
4234 	alloc->start = offset + start;
4235 	alloc->end = offset + end;
4236 
4237 	drm_dbg_kms(&dev_priv->drm,
4238 		    "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x\n",
4239 		    for_crtc->base.id, for_crtc->name,
4240 		    dbuf_slice_mask, alloc->start, alloc->end, active_pipes);
4241 
4242 	return 0;
4243 }
4244 
4245 static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
4246 				 int width, const struct drm_format_info *format,
4247 				 u64 modifier, unsigned int rotation,
4248 				 u32 plane_pixel_rate, struct skl_wm_params *wp,
4249 				 int color_plane);
4250 static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
4251 				 int level,
4252 				 unsigned int latency,
4253 				 const struct skl_wm_params *wp,
4254 				 const struct skl_wm_level *result_prev,
4255 				 struct skl_wm_level *result /* out */);
4256 
4257 static unsigned int
skl_cursor_allocation(const struct intel_crtc_state * crtc_state,int num_active)4258 skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
4259 		      int num_active)
4260 {
4261 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4262 	int level, max_level = ilk_wm_max_level(dev_priv);
4263 	struct skl_wm_level wm = {};
4264 	int ret, min_ddb_alloc = 0;
4265 	struct skl_wm_params wp;
4266 
4267 	ret = skl_compute_wm_params(crtc_state, 256,
4268 				    drm_format_info(DRM_FORMAT_ARGB8888),
4269 				    DRM_FORMAT_MOD_LINEAR,
4270 				    DRM_MODE_ROTATE_0,
4271 				    crtc_state->pixel_rate, &wp, 0);
4272 	drm_WARN_ON(&dev_priv->drm, ret);
4273 
4274 	for (level = 0; level <= max_level; level++) {
4275 		unsigned int latency = dev_priv->wm.skl_latency[level];
4276 
4277 		skl_compute_plane_wm(crtc_state, level, latency, &wp, &wm, &wm);
4278 		if (wm.min_ddb_alloc == U16_MAX)
4279 			break;
4280 
4281 		min_ddb_alloc = wm.min_ddb_alloc;
4282 	}
4283 
4284 	return max(num_active == 1 ? 32 : 8, min_ddb_alloc);
4285 }
4286 
skl_ddb_entry_init_from_hw(struct drm_i915_private * dev_priv,struct skl_ddb_entry * entry,u32 reg)4287 static void skl_ddb_entry_init_from_hw(struct drm_i915_private *dev_priv,
4288 				       struct skl_ddb_entry *entry, u32 reg)
4289 {
4290 
4291 	entry->start = reg & DDB_ENTRY_MASK;
4292 	entry->end = (reg >> DDB_ENTRY_END_SHIFT) & DDB_ENTRY_MASK;
4293 
4294 	if (entry->end)
4295 		entry->end += 1;
4296 }
4297 
4298 static void
skl_ddb_get_hw_plane_state(struct drm_i915_private * dev_priv,const enum pipe pipe,const enum plane_id plane_id,struct skl_ddb_entry * ddb_y,struct skl_ddb_entry * ddb_uv)4299 skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
4300 			   const enum pipe pipe,
4301 			   const enum plane_id plane_id,
4302 			   struct skl_ddb_entry *ddb_y,
4303 			   struct skl_ddb_entry *ddb_uv)
4304 {
4305 	u32 val, val2;
4306 	u32 fourcc = 0;
4307 
4308 	/* Cursor doesn't support NV12/planar, so no extra calculation needed */
4309 	if (plane_id == PLANE_CURSOR) {
4310 		val = I915_READ(CUR_BUF_CFG(pipe));
4311 		skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
4312 		return;
4313 	}
4314 
4315 	val = I915_READ(PLANE_CTL(pipe, plane_id));
4316 
4317 	/* No DDB allocated for disabled planes */
4318 	if (val & PLANE_CTL_ENABLE)
4319 		fourcc = skl_format_to_fourcc(val & PLANE_CTL_FORMAT_MASK,
4320 					      val & PLANE_CTL_ORDER_RGBX,
4321 					      val & PLANE_CTL_ALPHA_MASK);
4322 
4323 	if (INTEL_GEN(dev_priv) >= 11) {
4324 		val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
4325 		skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
4326 	} else {
4327 		val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
4328 		val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id));
4329 
4330 		if (fourcc &&
4331 		    drm_format_info_is_yuv_semiplanar(drm_format_info(fourcc)))
4332 			swap(val, val2);
4333 
4334 		skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
4335 		skl_ddb_entry_init_from_hw(dev_priv, ddb_uv, val2);
4336 	}
4337 }
4338 
skl_pipe_ddb_get_hw_state(struct intel_crtc * crtc,struct skl_ddb_entry * ddb_y,struct skl_ddb_entry * ddb_uv)4339 void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
4340 			       struct skl_ddb_entry *ddb_y,
4341 			       struct skl_ddb_entry *ddb_uv)
4342 {
4343 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4344 	enum intel_display_power_domain power_domain;
4345 	enum pipe pipe = crtc->pipe;
4346 	intel_wakeref_t wakeref;
4347 	enum plane_id plane_id;
4348 
4349 	power_domain = POWER_DOMAIN_PIPE(pipe);
4350 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
4351 	if (!wakeref)
4352 		return;
4353 
4354 	for_each_plane_id_on_crtc(crtc, plane_id)
4355 		skl_ddb_get_hw_plane_state(dev_priv, pipe,
4356 					   plane_id,
4357 					   &ddb_y[plane_id],
4358 					   &ddb_uv[plane_id]);
4359 
4360 	intel_display_power_put(dev_priv, power_domain, wakeref);
4361 }
4362 
4363 /*
4364  * Determines the downscale amount of a plane for the purposes of watermark calculations.
4365  * The bspec defines downscale amount as:
4366  *
4367  * """
4368  * Horizontal down scale amount = maximum[1, Horizontal source size /
4369  *                                           Horizontal destination size]
4370  * Vertical down scale amount = maximum[1, Vertical source size /
4371  *                                         Vertical destination size]
4372  * Total down scale amount = Horizontal down scale amount *
4373  *                           Vertical down scale amount
4374  * """
4375  *
4376  * Return value is provided in 16.16 fixed point form to retain fractional part.
4377  * Caller should take care of dividing & rounding off the value.
4378  */
4379 static uint_fixed_16_16_t
skl_plane_downscale_amount(const struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state)4380 skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state,
4381 			   const struct intel_plane_state *plane_state)
4382 {
4383 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4384 	u32 src_w, src_h, dst_w, dst_h;
4385 	uint_fixed_16_16_t fp_w_ratio, fp_h_ratio;
4386 	uint_fixed_16_16_t downscale_h, downscale_w;
4387 
4388 	if (drm_WARN_ON(&dev_priv->drm,
4389 			!intel_wm_plane_visible(crtc_state, plane_state)))
4390 		return u32_to_fixed16(0);
4391 
4392 	/*
4393 	 * Src coordinates are already rotated by 270 degrees for
4394 	 * the 90/270 degree plane rotation cases (to match the
4395 	 * GTT mapping), hence no need to account for rotation here.
4396 	 *
4397 	 * n.b., src is 16.16 fixed point, dst is whole integer.
4398 	 */
4399 	src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
4400 	src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
4401 	dst_w = drm_rect_width(&plane_state->uapi.dst);
4402 	dst_h = drm_rect_height(&plane_state->uapi.dst);
4403 
4404 	fp_w_ratio = div_fixed16(src_w, dst_w);
4405 	fp_h_ratio = div_fixed16(src_h, dst_h);
4406 	downscale_w = max_fixed16(fp_w_ratio, u32_to_fixed16(1));
4407 	downscale_h = max_fixed16(fp_h_ratio, u32_to_fixed16(1));
4408 
4409 	return mul_fixed16(downscale_w, downscale_h);
4410 }
4411 
4412 struct dbuf_slice_conf_entry {
4413 	u8 active_pipes;
4414 	u8 dbuf_mask[I915_MAX_PIPES];
4415 };
4416 
4417 /*
4418  * Table taken from Bspec 12716
4419  * Pipes do have some preferred DBuf slice affinity,
4420  * plus there are some hardcoded requirements on how
4421  * those should be distributed for multipipe scenarios.
4422  * For more DBuf slices algorithm can get even more messy
4423  * and less readable, so decided to use a table almost
4424  * as is from BSpec itself - that way it is at least easier
4425  * to compare, change and check.
4426  */
4427 static const struct dbuf_slice_conf_entry icl_allowed_dbufs[] =
4428 /* Autogenerated with igt/tools/intel_dbuf_map tool: */
4429 {
4430 	{
4431 		.active_pipes = BIT(PIPE_A),
4432 		.dbuf_mask = {
4433 			[PIPE_A] = BIT(DBUF_S1),
4434 		},
4435 	},
4436 	{
4437 		.active_pipes = BIT(PIPE_B),
4438 		.dbuf_mask = {
4439 			[PIPE_B] = BIT(DBUF_S1),
4440 		},
4441 	},
4442 	{
4443 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
4444 		.dbuf_mask = {
4445 			[PIPE_A] = BIT(DBUF_S1),
4446 			[PIPE_B] = BIT(DBUF_S2),
4447 		},
4448 	},
4449 	{
4450 		.active_pipes = BIT(PIPE_C),
4451 		.dbuf_mask = {
4452 			[PIPE_C] = BIT(DBUF_S2),
4453 		},
4454 	},
4455 	{
4456 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
4457 		.dbuf_mask = {
4458 			[PIPE_A] = BIT(DBUF_S1),
4459 			[PIPE_C] = BIT(DBUF_S2),
4460 		},
4461 	},
4462 	{
4463 		.active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
4464 		.dbuf_mask = {
4465 			[PIPE_B] = BIT(DBUF_S1),
4466 			[PIPE_C] = BIT(DBUF_S2),
4467 		},
4468 	},
4469 	{
4470 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
4471 		.dbuf_mask = {
4472 			[PIPE_A] = BIT(DBUF_S1),
4473 			[PIPE_B] = BIT(DBUF_S1),
4474 			[PIPE_C] = BIT(DBUF_S2),
4475 		},
4476 	},
4477 	{}
4478 };
4479 
4480 /*
4481  * Table taken from Bspec 49255
4482  * Pipes do have some preferred DBuf slice affinity,
4483  * plus there are some hardcoded requirements on how
4484  * those should be distributed for multipipe scenarios.
4485  * For more DBuf slices algorithm can get even more messy
4486  * and less readable, so decided to use a table almost
4487  * as is from BSpec itself - that way it is at least easier
4488  * to compare, change and check.
4489  */
4490 static const struct dbuf_slice_conf_entry tgl_allowed_dbufs[] =
4491 /* Autogenerated with igt/tools/intel_dbuf_map tool: */
4492 {
4493 	{
4494 		.active_pipes = BIT(PIPE_A),
4495 		.dbuf_mask = {
4496 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
4497 		},
4498 	},
4499 	{
4500 		.active_pipes = BIT(PIPE_B),
4501 		.dbuf_mask = {
4502 			[PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
4503 		},
4504 	},
4505 	{
4506 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
4507 		.dbuf_mask = {
4508 			[PIPE_A] = BIT(DBUF_S2),
4509 			[PIPE_B] = BIT(DBUF_S1),
4510 		},
4511 	},
4512 	{
4513 		.active_pipes = BIT(PIPE_C),
4514 		.dbuf_mask = {
4515 			[PIPE_C] = BIT(DBUF_S2) | BIT(DBUF_S1),
4516 		},
4517 	},
4518 	{
4519 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
4520 		.dbuf_mask = {
4521 			[PIPE_A] = BIT(DBUF_S1),
4522 			[PIPE_C] = BIT(DBUF_S2),
4523 		},
4524 	},
4525 	{
4526 		.active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
4527 		.dbuf_mask = {
4528 			[PIPE_B] = BIT(DBUF_S1),
4529 			[PIPE_C] = BIT(DBUF_S2),
4530 		},
4531 	},
4532 	{
4533 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
4534 		.dbuf_mask = {
4535 			[PIPE_A] = BIT(DBUF_S1),
4536 			[PIPE_B] = BIT(DBUF_S1),
4537 			[PIPE_C] = BIT(DBUF_S2),
4538 		},
4539 	},
4540 	{
4541 		.active_pipes = BIT(PIPE_D),
4542 		.dbuf_mask = {
4543 			[PIPE_D] = BIT(DBUF_S2) | BIT(DBUF_S1),
4544 		},
4545 	},
4546 	{
4547 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
4548 		.dbuf_mask = {
4549 			[PIPE_A] = BIT(DBUF_S1),
4550 			[PIPE_D] = BIT(DBUF_S2),
4551 		},
4552 	},
4553 	{
4554 		.active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
4555 		.dbuf_mask = {
4556 			[PIPE_B] = BIT(DBUF_S1),
4557 			[PIPE_D] = BIT(DBUF_S2),
4558 		},
4559 	},
4560 	{
4561 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
4562 		.dbuf_mask = {
4563 			[PIPE_A] = BIT(DBUF_S1),
4564 			[PIPE_B] = BIT(DBUF_S1),
4565 			[PIPE_D] = BIT(DBUF_S2),
4566 		},
4567 	},
4568 	{
4569 		.active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
4570 		.dbuf_mask = {
4571 			[PIPE_C] = BIT(DBUF_S1),
4572 			[PIPE_D] = BIT(DBUF_S2),
4573 		},
4574 	},
4575 	{
4576 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
4577 		.dbuf_mask = {
4578 			[PIPE_A] = BIT(DBUF_S1),
4579 			[PIPE_C] = BIT(DBUF_S2),
4580 			[PIPE_D] = BIT(DBUF_S2),
4581 		},
4582 	},
4583 	{
4584 		.active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
4585 		.dbuf_mask = {
4586 			[PIPE_B] = BIT(DBUF_S1),
4587 			[PIPE_C] = BIT(DBUF_S2),
4588 			[PIPE_D] = BIT(DBUF_S2),
4589 		},
4590 	},
4591 	{
4592 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
4593 		.dbuf_mask = {
4594 			[PIPE_A] = BIT(DBUF_S1),
4595 			[PIPE_B] = BIT(DBUF_S1),
4596 			[PIPE_C] = BIT(DBUF_S2),
4597 			[PIPE_D] = BIT(DBUF_S2),
4598 		},
4599 	},
4600 	{}
4601 };
4602 
compute_dbuf_slices(enum pipe pipe,u8 active_pipes,const struct dbuf_slice_conf_entry * dbuf_slices)4603 static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes,
4604 			      const struct dbuf_slice_conf_entry *dbuf_slices)
4605 {
4606 	int i;
4607 
4608 	for (i = 0; i < dbuf_slices[i].active_pipes; i++) {
4609 		if (dbuf_slices[i].active_pipes == active_pipes)
4610 			return dbuf_slices[i].dbuf_mask[pipe];
4611 	}
4612 	return 0;
4613 }
4614 
4615 /*
4616  * This function finds an entry with same enabled pipe configuration and
4617  * returns correspondent DBuf slice mask as stated in BSpec for particular
4618  * platform.
4619  */
icl_compute_dbuf_slices(enum pipe pipe,u8 active_pipes)4620 static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes)
4621 {
4622 	/*
4623 	 * FIXME: For ICL this is still a bit unclear as prev BSpec revision
4624 	 * required calculating "pipe ratio" in order to determine
4625 	 * if one or two slices can be used for single pipe configurations
4626 	 * as additional constraint to the existing table.
4627 	 * However based on recent info, it should be not "pipe ratio"
4628 	 * but rather ratio between pixel_rate and cdclk with additional
4629 	 * constants, so for now we are using only table until this is
4630 	 * clarified. Also this is the reason why crtc_state param is
4631 	 * still here - we will need it once those additional constraints
4632 	 * pop up.
4633 	 */
4634 	return compute_dbuf_slices(pipe, active_pipes, icl_allowed_dbufs);
4635 }
4636 
tgl_compute_dbuf_slices(enum pipe pipe,u8 active_pipes)4637 static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes)
4638 {
4639 	return compute_dbuf_slices(pipe, active_pipes, tgl_allowed_dbufs);
4640 }
4641 
skl_compute_dbuf_slices(const struct intel_crtc_state * crtc_state,u8 active_pipes)4642 static u8 skl_compute_dbuf_slices(const struct intel_crtc_state *crtc_state,
4643 				  u8 active_pipes)
4644 {
4645 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4646 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4647 	enum pipe pipe = crtc->pipe;
4648 
4649 	if (IS_GEN(dev_priv, 12))
4650 		return tgl_compute_dbuf_slices(pipe, active_pipes);
4651 	else if (IS_GEN(dev_priv, 11))
4652 		return icl_compute_dbuf_slices(pipe, active_pipes);
4653 	/*
4654 	 * For anything else just return one slice yet.
4655 	 * Should be extended for other platforms.
4656 	 */
4657 	return active_pipes & BIT(pipe) ? BIT(DBUF_S1) : 0;
4658 }
4659 
4660 static u64
skl_plane_relative_data_rate(const struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state,int color_plane)4661 skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
4662 			     const struct intel_plane_state *plane_state,
4663 			     int color_plane)
4664 {
4665 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
4666 	const struct drm_framebuffer *fb = plane_state->hw.fb;
4667 	u32 data_rate;
4668 	u32 width = 0, height = 0;
4669 	uint_fixed_16_16_t down_scale_amount;
4670 	u64 rate;
4671 
4672 	if (!plane_state->uapi.visible)
4673 		return 0;
4674 
4675 	if (plane->id == PLANE_CURSOR)
4676 		return 0;
4677 
4678 	if (color_plane == 1 &&
4679 	    !intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
4680 		return 0;
4681 
4682 	/*
4683 	 * Src coordinates are already rotated by 270 degrees for
4684 	 * the 90/270 degree plane rotation cases (to match the
4685 	 * GTT mapping), hence no need to account for rotation here.
4686 	 */
4687 	width = drm_rect_width(&plane_state->uapi.src) >> 16;
4688 	height = drm_rect_height(&plane_state->uapi.src) >> 16;
4689 
4690 	/* UV plane does 1/2 pixel sub-sampling */
4691 	if (color_plane == 1) {
4692 		width /= 2;
4693 		height /= 2;
4694 	}
4695 
4696 	data_rate = width * height;
4697 
4698 	down_scale_amount = skl_plane_downscale_amount(crtc_state, plane_state);
4699 
4700 	rate = mul_round_up_u32_fixed16(data_rate, down_scale_amount);
4701 
4702 	rate *= fb->format->cpp[color_plane];
4703 	return rate;
4704 }
4705 
4706 static u64
skl_get_total_relative_data_rate(struct intel_crtc_state * crtc_state,u64 * plane_data_rate,u64 * uv_plane_data_rate)4707 skl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
4708 				 u64 *plane_data_rate,
4709 				 u64 *uv_plane_data_rate)
4710 {
4711 	struct intel_plane *plane;
4712 	const struct intel_plane_state *plane_state;
4713 	u64 total_data_rate = 0;
4714 
4715 	/* Calculate and cache data rate for each plane */
4716 	intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
4717 		enum plane_id plane_id = plane->id;
4718 		u64 rate;
4719 
4720 		/* packed/y */
4721 		rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0);
4722 		plane_data_rate[plane_id] = rate;
4723 		total_data_rate += rate;
4724 
4725 		/* uv-plane */
4726 		rate = skl_plane_relative_data_rate(crtc_state, plane_state, 1);
4727 		uv_plane_data_rate[plane_id] = rate;
4728 		total_data_rate += rate;
4729 	}
4730 
4731 	return total_data_rate;
4732 }
4733 
4734 static u64
icl_get_total_relative_data_rate(struct intel_crtc_state * crtc_state,u64 * plane_data_rate)4735 icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
4736 				 u64 *plane_data_rate)
4737 {
4738 	struct intel_plane *plane;
4739 	const struct intel_plane_state *plane_state;
4740 	u64 total_data_rate = 0;
4741 
4742 	/* Calculate and cache data rate for each plane */
4743 	intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
4744 		enum plane_id plane_id = plane->id;
4745 		u64 rate;
4746 
4747 		if (!plane_state->planar_linked_plane) {
4748 			rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0);
4749 			plane_data_rate[plane_id] = rate;
4750 			total_data_rate += rate;
4751 		} else {
4752 			enum plane_id y_plane_id;
4753 
4754 			/*
4755 			 * The slave plane might not iterate in
4756 			 * intel_atomic_crtc_state_for_each_plane_state(),
4757 			 * and needs the master plane state which may be
4758 			 * NULL if we try get_new_plane_state(), so we
4759 			 * always calculate from the master.
4760 			 */
4761 			if (plane_state->planar_slave)
4762 				continue;
4763 
4764 			/* Y plane rate is calculated on the slave */
4765 			rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0);
4766 			y_plane_id = plane_state->planar_linked_plane->id;
4767 			plane_data_rate[y_plane_id] = rate;
4768 			total_data_rate += rate;
4769 
4770 			rate = skl_plane_relative_data_rate(crtc_state, plane_state, 1);
4771 			plane_data_rate[plane_id] = rate;
4772 			total_data_rate += rate;
4773 		}
4774 	}
4775 
4776 	return total_data_rate;
4777 }
4778 
4779 static const struct skl_wm_level *
skl_plane_wm_level(const struct intel_crtc_state * crtc_state,enum plane_id plane_id,int level)4780 skl_plane_wm_level(const struct intel_crtc_state *crtc_state,
4781 		   enum plane_id plane_id,
4782 		   int level)
4783 {
4784 	const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
4785 	const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
4786 
4787 	if (level == 0 && pipe_wm->use_sagv_wm)
4788 		return &wm->sagv_wm0;
4789 
4790 	return &wm->wm[level];
4791 }
4792 
4793 static int
skl_allocate_pipe_ddb(struct intel_crtc_state * crtc_state)4794 skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state)
4795 {
4796 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4797 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4798 	struct skl_ddb_entry *alloc = &crtc_state->wm.skl.ddb;
4799 	u16 alloc_size, start = 0;
4800 	u16 total[I915_MAX_PLANES] = {};
4801 	u16 uv_total[I915_MAX_PLANES] = {};
4802 	u64 total_data_rate;
4803 	enum plane_id plane_id;
4804 	int num_active;
4805 	u64 plane_data_rate[I915_MAX_PLANES] = {};
4806 	u64 uv_plane_data_rate[I915_MAX_PLANES] = {};
4807 	u32 blocks;
4808 	int level;
4809 	int ret;
4810 
4811 	/* Clear the partitioning for disabled planes. */
4812 	memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
4813 	memset(crtc_state->wm.skl.plane_ddb_uv, 0, sizeof(crtc_state->wm.skl.plane_ddb_uv));
4814 
4815 	if (!crtc_state->hw.active) {
4816 		struct intel_atomic_state *state =
4817 			to_intel_atomic_state(crtc_state->uapi.state);
4818 		struct intel_dbuf_state *new_dbuf_state =
4819 			intel_atomic_get_new_dbuf_state(state);
4820 		const struct intel_dbuf_state *old_dbuf_state =
4821 			intel_atomic_get_old_dbuf_state(state);
4822 
4823 		/*
4824 		 * FIXME hack to make sure we compute this sensibly when
4825 		 * turning off all the pipes. Otherwise we leave it at
4826 		 * whatever we had previously, and then runtime PM will
4827 		 * mess it up by turning off all but S1. Remove this
4828 		 * once the dbuf state computation flow becomes sane.
4829 		 */
4830 		if (new_dbuf_state->active_pipes == 0) {
4831 			new_dbuf_state->enabled_slices = BIT(DBUF_S1);
4832 
4833 			if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices) {
4834 				ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
4835 				if (ret)
4836 					return ret;
4837 			}
4838 		}
4839 
4840 		alloc->start = alloc->end = 0;
4841 		return 0;
4842 	}
4843 
4844 	if (INTEL_GEN(dev_priv) >= 11)
4845 		total_data_rate =
4846 			icl_get_total_relative_data_rate(crtc_state,
4847 							 plane_data_rate);
4848 	else
4849 		total_data_rate =
4850 			skl_get_total_relative_data_rate(crtc_state,
4851 							 plane_data_rate,
4852 							 uv_plane_data_rate);
4853 
4854 	ret = skl_ddb_get_pipe_allocation_limits(dev_priv, crtc_state,
4855 						 total_data_rate,
4856 						 alloc, &num_active);
4857 	if (ret)
4858 		return ret;
4859 
4860 	alloc_size = skl_ddb_entry_size(alloc);
4861 	if (alloc_size == 0)
4862 		return 0;
4863 
4864 	/* Allocate fixed number of blocks for cursor. */
4865 	total[PLANE_CURSOR] = skl_cursor_allocation(crtc_state, num_active);
4866 	alloc_size -= total[PLANE_CURSOR];
4867 	crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR].start =
4868 		alloc->end - total[PLANE_CURSOR];
4869 	crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end;
4870 
4871 	if (total_data_rate == 0)
4872 		return 0;
4873 
4874 	/*
4875 	 * Find the highest watermark level for which we can satisfy the block
4876 	 * requirement of active planes.
4877 	 */
4878 	for (level = ilk_wm_max_level(dev_priv); level >= 0; level--) {
4879 		blocks = 0;
4880 		for_each_plane_id_on_crtc(crtc, plane_id) {
4881 			const struct skl_plane_wm *wm =
4882 				&crtc_state->wm.skl.optimal.planes[plane_id];
4883 
4884 			if (plane_id == PLANE_CURSOR) {
4885 				if (wm->wm[level].min_ddb_alloc > total[PLANE_CURSOR]) {
4886 					drm_WARN_ON(&dev_priv->drm,
4887 						    wm->wm[level].min_ddb_alloc != U16_MAX);
4888 					blocks = U32_MAX;
4889 					break;
4890 				}
4891 				continue;
4892 			}
4893 
4894 			blocks += wm->wm[level].min_ddb_alloc;
4895 			blocks += wm->uv_wm[level].min_ddb_alloc;
4896 		}
4897 
4898 		if (blocks <= alloc_size) {
4899 			alloc_size -= blocks;
4900 			break;
4901 		}
4902 	}
4903 
4904 	if (level < 0) {
4905 		drm_dbg_kms(&dev_priv->drm,
4906 			    "Requested display configuration exceeds system DDB limitations");
4907 		drm_dbg_kms(&dev_priv->drm, "minimum required %d/%d\n",
4908 			    blocks, alloc_size);
4909 		return -EINVAL;
4910 	}
4911 
4912 	/*
4913 	 * Grant each plane the blocks it requires at the highest achievable
4914 	 * watermark level, plus an extra share of the leftover blocks
4915 	 * proportional to its relative data rate.
4916 	 */
4917 	for_each_plane_id_on_crtc(crtc, plane_id) {
4918 		const struct skl_plane_wm *wm =
4919 			&crtc_state->wm.skl.optimal.planes[plane_id];
4920 		u64 rate;
4921 		u16 extra;
4922 
4923 		if (plane_id == PLANE_CURSOR)
4924 			continue;
4925 
4926 		/*
4927 		 * We've accounted for all active planes; remaining planes are
4928 		 * all disabled.
4929 		 */
4930 		if (total_data_rate == 0)
4931 			break;
4932 
4933 		rate = plane_data_rate[plane_id];
4934 		extra = min_t(u16, alloc_size,
4935 			      DIV64_U64_ROUND_UP(alloc_size * rate,
4936 						 total_data_rate));
4937 		total[plane_id] = wm->wm[level].min_ddb_alloc + extra;
4938 		alloc_size -= extra;
4939 		total_data_rate -= rate;
4940 
4941 		if (total_data_rate == 0)
4942 			break;
4943 
4944 		rate = uv_plane_data_rate[plane_id];
4945 		extra = min_t(u16, alloc_size,
4946 			      DIV64_U64_ROUND_UP(alloc_size * rate,
4947 						 total_data_rate));
4948 		uv_total[plane_id] = wm->uv_wm[level].min_ddb_alloc + extra;
4949 		alloc_size -= extra;
4950 		total_data_rate -= rate;
4951 	}
4952 	drm_WARN_ON(&dev_priv->drm, alloc_size != 0 || total_data_rate != 0);
4953 
4954 	/* Set the actual DDB start/end points for each plane */
4955 	start = alloc->start;
4956 	for_each_plane_id_on_crtc(crtc, plane_id) {
4957 		struct skl_ddb_entry *plane_alloc =
4958 			&crtc_state->wm.skl.plane_ddb_y[plane_id];
4959 		struct skl_ddb_entry *uv_plane_alloc =
4960 			&crtc_state->wm.skl.plane_ddb_uv[plane_id];
4961 
4962 		if (plane_id == PLANE_CURSOR)
4963 			continue;
4964 
4965 		/* Gen11+ uses a separate plane for UV watermarks */
4966 		drm_WARN_ON(&dev_priv->drm,
4967 			    INTEL_GEN(dev_priv) >= 11 && uv_total[plane_id]);
4968 
4969 		/* Leave disabled planes at (0,0) */
4970 		if (total[plane_id]) {
4971 			plane_alloc->start = start;
4972 			start += total[plane_id];
4973 			plane_alloc->end = start;
4974 		}
4975 
4976 		if (uv_total[plane_id]) {
4977 			uv_plane_alloc->start = start;
4978 			start += uv_total[plane_id];
4979 			uv_plane_alloc->end = start;
4980 		}
4981 	}
4982 
4983 	/*
4984 	 * When we calculated watermark values we didn't know how high
4985 	 * of a level we'd actually be able to hit, so we just marked
4986 	 * all levels as "enabled."  Go back now and disable the ones
4987 	 * that aren't actually possible.
4988 	 */
4989 	for (level++; level <= ilk_wm_max_level(dev_priv); level++) {
4990 		for_each_plane_id_on_crtc(crtc, plane_id) {
4991 			struct skl_plane_wm *wm =
4992 				&crtc_state->wm.skl.optimal.planes[plane_id];
4993 
4994 			/*
4995 			 * We only disable the watermarks for each plane if
4996 			 * they exceed the ddb allocation of said plane. This
4997 			 * is done so that we don't end up touching cursor
4998 			 * watermarks needlessly when some other plane reduces
4999 			 * our max possible watermark level.
5000 			 *
5001 			 * Bspec has this to say about the PLANE_WM enable bit:
5002 			 * "All the watermarks at this level for all enabled
5003 			 *  planes must be enabled before the level will be used."
5004 			 * So this is actually safe to do.
5005 			 */
5006 			if (wm->wm[level].min_ddb_alloc > total[plane_id] ||
5007 			    wm->uv_wm[level].min_ddb_alloc > uv_total[plane_id])
5008 				memset(&wm->wm[level], 0, sizeof(wm->wm[level]));
5009 
5010 			/*
5011 			 * Wa_1408961008:icl, ehl
5012 			 * Underruns with WM1+ disabled
5013 			 */
5014 			if (IS_GEN(dev_priv, 11) &&
5015 			    level == 1 && wm->wm[0].plane_en) {
5016 				wm->wm[level].plane_res_b = wm->wm[0].plane_res_b;
5017 				wm->wm[level].plane_res_l = wm->wm[0].plane_res_l;
5018 				wm->wm[level].ignore_lines = wm->wm[0].ignore_lines;
5019 			}
5020 		}
5021 	}
5022 
5023 	/*
5024 	 * Go back and disable the transition watermark if it turns out we
5025 	 * don't have enough DDB blocks for it.
5026 	 */
5027 	for_each_plane_id_on_crtc(crtc, plane_id) {
5028 		struct skl_plane_wm *wm =
5029 			&crtc_state->wm.skl.optimal.planes[plane_id];
5030 
5031 		if (wm->trans_wm.plane_res_b >= total[plane_id])
5032 			memset(&wm->trans_wm, 0, sizeof(wm->trans_wm));
5033 	}
5034 
5035 	return 0;
5036 }
5037 
5038 /*
5039  * The max latency should be 257 (max the punit can code is 255 and we add 2us
5040  * for the read latency) and cpp should always be <= 8, so that
5041  * should allow pixel_rate up to ~2 GHz which seems sufficient since max
5042  * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
5043 */
5044 static uint_fixed_16_16_t
skl_wm_method1(const struct drm_i915_private * dev_priv,u32 pixel_rate,u8 cpp,u32 latency,u32 dbuf_block_size)5045 skl_wm_method1(const struct drm_i915_private *dev_priv, u32 pixel_rate,
5046 	       u8 cpp, u32 latency, u32 dbuf_block_size)
5047 {
5048 	u32 wm_intermediate_val;
5049 	uint_fixed_16_16_t ret;
5050 
5051 	if (latency == 0)
5052 		return FP_16_16_MAX;
5053 
5054 	wm_intermediate_val = latency * pixel_rate * cpp;
5055 	ret = div_fixed16(wm_intermediate_val, 1000 * dbuf_block_size);
5056 
5057 	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
5058 		ret = add_fixed16_u32(ret, 1);
5059 
5060 	return ret;
5061 }
5062 
5063 static uint_fixed_16_16_t
skl_wm_method2(u32 pixel_rate,u32 pipe_htotal,u32 latency,uint_fixed_16_16_t plane_blocks_per_line)5064 skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency,
5065 	       uint_fixed_16_16_t plane_blocks_per_line)
5066 {
5067 	u32 wm_intermediate_val;
5068 	uint_fixed_16_16_t ret;
5069 
5070 	if (latency == 0)
5071 		return FP_16_16_MAX;
5072 
5073 	wm_intermediate_val = latency * pixel_rate;
5074 	wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val,
5075 					   pipe_htotal * 1000);
5076 	ret = mul_u32_fixed16(wm_intermediate_val, plane_blocks_per_line);
5077 	return ret;
5078 }
5079 
5080 static uint_fixed_16_16_t
intel_get_linetime_us(const struct intel_crtc_state * crtc_state)5081 intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
5082 {
5083 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
5084 	u32 pixel_rate;
5085 	u32 crtc_htotal;
5086 	uint_fixed_16_16_t linetime_us;
5087 
5088 	if (!crtc_state->hw.active)
5089 		return u32_to_fixed16(0);
5090 
5091 	pixel_rate = crtc_state->pixel_rate;
5092 
5093 	if (drm_WARN_ON(&dev_priv->drm, pixel_rate == 0))
5094 		return u32_to_fixed16(0);
5095 
5096 	crtc_htotal = crtc_state->hw.adjusted_mode.crtc_htotal;
5097 	linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate);
5098 
5099 	return linetime_us;
5100 }
5101 
5102 static u32
skl_adjusted_plane_pixel_rate(const struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state)5103 skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *crtc_state,
5104 			      const struct intel_plane_state *plane_state)
5105 {
5106 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
5107 	u64 adjusted_pixel_rate;
5108 	uint_fixed_16_16_t downscale_amount;
5109 
5110 	/* Shouldn't reach here on disabled planes... */
5111 	if (drm_WARN_ON(&dev_priv->drm,
5112 			!intel_wm_plane_visible(crtc_state, plane_state)))
5113 		return 0;
5114 
5115 	/*
5116 	 * Adjusted plane pixel rate is just the pipe's adjusted pixel rate
5117 	 * with additional adjustments for plane-specific scaling.
5118 	 */
5119 	adjusted_pixel_rate = crtc_state->pixel_rate;
5120 	downscale_amount = skl_plane_downscale_amount(crtc_state, plane_state);
5121 
5122 	return mul_round_up_u32_fixed16(adjusted_pixel_rate,
5123 					    downscale_amount);
5124 }
5125 
5126 static int
skl_compute_wm_params(const struct intel_crtc_state * crtc_state,int width,const struct drm_format_info * format,u64 modifier,unsigned int rotation,u32 plane_pixel_rate,struct skl_wm_params * wp,int color_plane)5127 skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
5128 		      int width, const struct drm_format_info *format,
5129 		      u64 modifier, unsigned int rotation,
5130 		      u32 plane_pixel_rate, struct skl_wm_params *wp,
5131 		      int color_plane)
5132 {
5133 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5134 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5135 	u32 interm_pbpl;
5136 
5137 	/* only planar format has two planes */
5138 	if (color_plane == 1 &&
5139 	    !intel_format_info_is_yuv_semiplanar(format, modifier)) {
5140 		drm_dbg_kms(&dev_priv->drm,
5141 			    "Non planar format have single plane\n");
5142 		return -EINVAL;
5143 	}
5144 
5145 	wp->y_tiled = modifier == I915_FORMAT_MOD_Y_TILED ||
5146 		      modifier == I915_FORMAT_MOD_Yf_TILED ||
5147 		      modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
5148 		      modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
5149 		      modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
5150 		      modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
5151 	wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED;
5152 	wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
5153 			 modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
5154 			 modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
5155 			 modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
5156 	wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier);
5157 
5158 	wp->width = width;
5159 	if (color_plane == 1 && wp->is_planar)
5160 		wp->width /= 2;
5161 
5162 	wp->cpp = format->cpp[color_plane];
5163 	wp->plane_pixel_rate = plane_pixel_rate;
5164 
5165 	if (INTEL_GEN(dev_priv) >= 11 &&
5166 	    modifier == I915_FORMAT_MOD_Yf_TILED  && wp->cpp == 1)
5167 		wp->dbuf_block_size = 256;
5168 	else
5169 		wp->dbuf_block_size = 512;
5170 
5171 	if (drm_rotation_90_or_270(rotation)) {
5172 		switch (wp->cpp) {
5173 		case 1:
5174 			wp->y_min_scanlines = 16;
5175 			break;
5176 		case 2:
5177 			wp->y_min_scanlines = 8;
5178 			break;
5179 		case 4:
5180 			wp->y_min_scanlines = 4;
5181 			break;
5182 		default:
5183 			MISSING_CASE(wp->cpp);
5184 			return -EINVAL;
5185 		}
5186 	} else {
5187 		wp->y_min_scanlines = 4;
5188 	}
5189 
5190 	if (skl_needs_memory_bw_wa(dev_priv))
5191 		wp->y_min_scanlines *= 2;
5192 
5193 	wp->plane_bytes_per_line = wp->width * wp->cpp;
5194 	if (wp->y_tiled) {
5195 		interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line *
5196 					   wp->y_min_scanlines,
5197 					   wp->dbuf_block_size);
5198 
5199 		if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
5200 			interm_pbpl++;
5201 
5202 		wp->plane_blocks_per_line = div_fixed16(interm_pbpl,
5203 							wp->y_min_scanlines);
5204 	} else {
5205 		interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
5206 					   wp->dbuf_block_size);
5207 
5208 		if (!wp->x_tiled ||
5209 		    INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
5210 			interm_pbpl++;
5211 
5212 		wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
5213 	}
5214 
5215 	wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines,
5216 					     wp->plane_blocks_per_line);
5217 
5218 	wp->linetime_us = fixed16_to_u32_round_up(
5219 					intel_get_linetime_us(crtc_state));
5220 
5221 	return 0;
5222 }
5223 
5224 static int
skl_compute_plane_wm_params(const struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state,struct skl_wm_params * wp,int color_plane)5225 skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
5226 			    const struct intel_plane_state *plane_state,
5227 			    struct skl_wm_params *wp, int color_plane)
5228 {
5229 	const struct drm_framebuffer *fb = plane_state->hw.fb;
5230 	int width;
5231 
5232 	/*
5233 	 * Src coordinates are already rotated by 270 degrees for
5234 	 * the 90/270 degree plane rotation cases (to match the
5235 	 * GTT mapping), hence no need to account for rotation here.
5236 	 */
5237 	width = drm_rect_width(&plane_state->uapi.src) >> 16;
5238 
5239 	return skl_compute_wm_params(crtc_state, width,
5240 				     fb->format, fb->modifier,
5241 				     plane_state->hw.rotation,
5242 				     skl_adjusted_plane_pixel_rate(crtc_state, plane_state),
5243 				     wp, color_plane);
5244 }
5245 
skl_wm_has_lines(struct drm_i915_private * dev_priv,int level)5246 static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level)
5247 {
5248 	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
5249 		return true;
5250 
5251 	/* The number of lines are ignored for the level 0 watermark. */
5252 	return level > 0;
5253 }
5254 
skl_compute_plane_wm(const struct intel_crtc_state * crtc_state,int level,unsigned int latency,const struct skl_wm_params * wp,const struct skl_wm_level * result_prev,struct skl_wm_level * result)5255 static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
5256 				 int level,
5257 				 unsigned int latency,
5258 				 const struct skl_wm_params *wp,
5259 				 const struct skl_wm_level *result_prev,
5260 				 struct skl_wm_level *result /* out */)
5261 {
5262 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
5263 	uint_fixed_16_16_t method1, method2;
5264 	uint_fixed_16_16_t selected_result;
5265 	u32 res_blocks, res_lines, min_ddb_alloc = 0;
5266 
5267 	if (latency == 0) {
5268 		/* reject it */
5269 		result->min_ddb_alloc = U16_MAX;
5270 		return;
5271 	}
5272 
5273 	/*
5274 	 * WaIncreaseLatencyIPCEnabled: kbl,cfl
5275 	 * Display WA #1141: kbl,cfl
5276 	 */
5277 	if ((IS_KABYLAKE(dev_priv) ||
5278 	     IS_COFFEELAKE(dev_priv) ||
5279 	     IS_COMETLAKE(dev_priv)) &&
5280 	    dev_priv->ipc_enabled)
5281 		latency += 4;
5282 
5283 	if (skl_needs_memory_bw_wa(dev_priv) && wp->x_tiled)
5284 		latency += 15;
5285 
5286 	method1 = skl_wm_method1(dev_priv, wp->plane_pixel_rate,
5287 				 wp->cpp, latency, wp->dbuf_block_size);
5288 	method2 = skl_wm_method2(wp->plane_pixel_rate,
5289 				 crtc_state->hw.adjusted_mode.crtc_htotal,
5290 				 latency,
5291 				 wp->plane_blocks_per_line);
5292 
5293 	if (wp->y_tiled) {
5294 		selected_result = max_fixed16(method2, wp->y_tile_minimum);
5295 	} else {
5296 		if ((wp->cpp * crtc_state->hw.adjusted_mode.crtc_htotal /
5297 		     wp->dbuf_block_size < 1) &&
5298 		     (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
5299 			selected_result = method2;
5300 		} else if (latency >= wp->linetime_us) {
5301 			if (IS_GEN(dev_priv, 9) &&
5302 			    !IS_GEMINILAKE(dev_priv))
5303 				selected_result = min_fixed16(method1, method2);
5304 			else
5305 				selected_result = method2;
5306 		} else {
5307 			selected_result = method1;
5308 		}
5309 	}
5310 
5311 	res_blocks = fixed16_to_u32_round_up(selected_result) + 1;
5312 	res_lines = div_round_up_fixed16(selected_result,
5313 					 wp->plane_blocks_per_line);
5314 
5315 	if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) {
5316 		/* Display WA #1125: skl,bxt,kbl */
5317 		if (level == 0 && wp->rc_surface)
5318 			res_blocks +=
5319 				fixed16_to_u32_round_up(wp->y_tile_minimum);
5320 
5321 		/* Display WA #1126: skl,bxt,kbl */
5322 		if (level >= 1 && level <= 7) {
5323 			if (wp->y_tiled) {
5324 				res_blocks +=
5325 				    fixed16_to_u32_round_up(wp->y_tile_minimum);
5326 				res_lines += wp->y_min_scanlines;
5327 			} else {
5328 				res_blocks++;
5329 			}
5330 
5331 			/*
5332 			 * Make sure result blocks for higher latency levels are
5333 			 * atleast as high as level below the current level.
5334 			 * Assumption in DDB algorithm optimization for special
5335 			 * cases. Also covers Display WA #1125 for RC.
5336 			 */
5337 			if (result_prev->plane_res_b > res_blocks)
5338 				res_blocks = result_prev->plane_res_b;
5339 		}
5340 	}
5341 
5342 	if (INTEL_GEN(dev_priv) >= 11) {
5343 		if (wp->y_tiled) {
5344 			int extra_lines;
5345 
5346 			if (res_lines % wp->y_min_scanlines == 0)
5347 				extra_lines = wp->y_min_scanlines;
5348 			else
5349 				extra_lines = wp->y_min_scanlines * 2 -
5350 					res_lines % wp->y_min_scanlines;
5351 
5352 			min_ddb_alloc = mul_round_up_u32_fixed16(res_lines + extra_lines,
5353 								 wp->plane_blocks_per_line);
5354 		} else {
5355 			min_ddb_alloc = res_blocks +
5356 				DIV_ROUND_UP(res_blocks, 10);
5357 		}
5358 	}
5359 
5360 	if (!skl_wm_has_lines(dev_priv, level))
5361 		res_lines = 0;
5362 
5363 	if (res_lines > 31) {
5364 		/* reject it */
5365 		result->min_ddb_alloc = U16_MAX;
5366 		return;
5367 	}
5368 
5369 	/*
5370 	 * If res_lines is valid, assume we can use this watermark level
5371 	 * for now.  We'll come back and disable it after we calculate the
5372 	 * DDB allocation if it turns out we don't actually have enough
5373 	 * blocks to satisfy it.
5374 	 */
5375 	result->plane_res_b = res_blocks;
5376 	result->plane_res_l = res_lines;
5377 	/* Bspec says: value >= plane ddb allocation -> invalid, hence the +1 here */
5378 	result->min_ddb_alloc = max(min_ddb_alloc, res_blocks) + 1;
5379 	result->plane_en = true;
5380 }
5381 
5382 static void
skl_compute_wm_levels(const struct intel_crtc_state * crtc_state,const struct skl_wm_params * wm_params,struct skl_wm_level * levels)5383 skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
5384 		      const struct skl_wm_params *wm_params,
5385 		      struct skl_wm_level *levels)
5386 {
5387 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
5388 	int level, max_level = ilk_wm_max_level(dev_priv);
5389 	struct skl_wm_level *result_prev = &levels[0];
5390 
5391 	for (level = 0; level <= max_level; level++) {
5392 		struct skl_wm_level *result = &levels[level];
5393 		unsigned int latency = dev_priv->wm.skl_latency[level];
5394 
5395 		skl_compute_plane_wm(crtc_state, level, latency,
5396 				     wm_params, result_prev, result);
5397 
5398 		result_prev = result;
5399 	}
5400 }
5401 
tgl_compute_sagv_wm(const struct intel_crtc_state * crtc_state,const struct skl_wm_params * wm_params,struct skl_plane_wm * plane_wm)5402 static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state,
5403 				const struct skl_wm_params *wm_params,
5404 				struct skl_plane_wm *plane_wm)
5405 {
5406 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
5407 	struct skl_wm_level *sagv_wm = &plane_wm->sagv_wm0;
5408 	struct skl_wm_level *levels = plane_wm->wm;
5409 	unsigned int latency = dev_priv->wm.skl_latency[0] + dev_priv->sagv_block_time_us;
5410 
5411 	skl_compute_plane_wm(crtc_state, 0, latency,
5412 			     wm_params, &levels[0],
5413 			     sagv_wm);
5414 }
5415 
skl_compute_transition_wm(const struct intel_crtc_state * crtc_state,const struct skl_wm_params * wp,struct skl_plane_wm * wm)5416 static void skl_compute_transition_wm(const struct intel_crtc_state *crtc_state,
5417 				      const struct skl_wm_params *wp,
5418 				      struct skl_plane_wm *wm)
5419 {
5420 	struct drm_device *dev = crtc_state->uapi.crtc->dev;
5421 	const struct drm_i915_private *dev_priv = to_i915(dev);
5422 	u16 trans_min, trans_amount, trans_y_tile_min;
5423 	u16 wm0_sel_res_b, trans_offset_b, res_blocks;
5424 
5425 	/* Transition WM don't make any sense if ipc is disabled */
5426 	if (!dev_priv->ipc_enabled)
5427 		return;
5428 
5429 	/*
5430 	 * WaDisableTWM:skl,kbl,cfl,bxt
5431 	 * Transition WM are not recommended by HW team for GEN9
5432 	 */
5433 	if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv))
5434 		return;
5435 
5436 	if (INTEL_GEN(dev_priv) >= 11)
5437 		trans_min = 4;
5438 	else
5439 		trans_min = 14;
5440 
5441 	/* Display WA #1140: glk,cnl */
5442 	if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
5443 		trans_amount = 0;
5444 	else
5445 		trans_amount = 10; /* This is configurable amount */
5446 
5447 	trans_offset_b = trans_min + trans_amount;
5448 
5449 	/*
5450 	 * The spec asks for Selected Result Blocks for wm0 (the real value),
5451 	 * not Result Blocks (the integer value). Pay attention to the capital
5452 	 * letters. The value wm_l0->plane_res_b is actually Result Blocks, but
5453 	 * since Result Blocks is the ceiling of Selected Result Blocks plus 1,
5454 	 * and since we later will have to get the ceiling of the sum in the
5455 	 * transition watermarks calculation, we can just pretend Selected
5456 	 * Result Blocks is Result Blocks minus 1 and it should work for the
5457 	 * current platforms.
5458 	 */
5459 	wm0_sel_res_b = wm->wm[0].plane_res_b - 1;
5460 
5461 	if (wp->y_tiled) {
5462 		trans_y_tile_min =
5463 			(u16)mul_round_up_u32_fixed16(2, wp->y_tile_minimum);
5464 		res_blocks = max(wm0_sel_res_b, trans_y_tile_min) +
5465 				trans_offset_b;
5466 	} else {
5467 		res_blocks = wm0_sel_res_b + trans_offset_b;
5468 	}
5469 
5470 	/*
5471 	 * Just assume we can enable the transition watermark.  After
5472 	 * computing the DDB we'll come back and disable it if that
5473 	 * assumption turns out to be false.
5474 	 */
5475 	wm->trans_wm.plane_res_b = res_blocks + 1;
5476 	wm->trans_wm.plane_en = true;
5477 }
5478 
skl_build_plane_wm_single(struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state,enum plane_id plane_id,int color_plane)5479 static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
5480 				     const struct intel_plane_state *plane_state,
5481 				     enum plane_id plane_id, int color_plane)
5482 {
5483 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5484 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5485 	struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
5486 	struct skl_wm_params wm_params;
5487 	int ret;
5488 
5489 	ret = skl_compute_plane_wm_params(crtc_state, plane_state,
5490 					  &wm_params, color_plane);
5491 	if (ret)
5492 		return ret;
5493 
5494 	skl_compute_wm_levels(crtc_state, &wm_params, wm->wm);
5495 
5496 	if (INTEL_GEN(dev_priv) >= 12)
5497 		tgl_compute_sagv_wm(crtc_state, &wm_params, wm);
5498 
5499 	skl_compute_transition_wm(crtc_state, &wm_params, wm);
5500 
5501 	return 0;
5502 }
5503 
skl_build_plane_wm_uv(struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state,enum plane_id plane_id)5504 static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
5505 				 const struct intel_plane_state *plane_state,
5506 				 enum plane_id plane_id)
5507 {
5508 	struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
5509 	struct skl_wm_params wm_params;
5510 	int ret;
5511 
5512 	wm->is_planar = true;
5513 
5514 	/* uv plane watermarks must also be validated for NV12/Planar */
5515 	ret = skl_compute_plane_wm_params(crtc_state, plane_state,
5516 					  &wm_params, 1);
5517 	if (ret)
5518 		return ret;
5519 
5520 	skl_compute_wm_levels(crtc_state, &wm_params, wm->uv_wm);
5521 
5522 	return 0;
5523 }
5524 
skl_build_plane_wm(struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state)5525 static int skl_build_plane_wm(struct intel_crtc_state *crtc_state,
5526 			      const struct intel_plane_state *plane_state)
5527 {
5528 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
5529 	const struct drm_framebuffer *fb = plane_state->hw.fb;
5530 	enum plane_id plane_id = plane->id;
5531 	int ret;
5532 
5533 	if (!intel_wm_plane_visible(crtc_state, plane_state))
5534 		return 0;
5535 
5536 	ret = skl_build_plane_wm_single(crtc_state, plane_state,
5537 					plane_id, 0);
5538 	if (ret)
5539 		return ret;
5540 
5541 	if (fb->format->is_yuv && fb->format->num_planes > 1) {
5542 		ret = skl_build_plane_wm_uv(crtc_state, plane_state,
5543 					    plane_id);
5544 		if (ret)
5545 			return ret;
5546 	}
5547 
5548 	return 0;
5549 }
5550 
icl_build_plane_wm(struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state)5551 static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
5552 			      const struct intel_plane_state *plane_state)
5553 {
5554 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
5555 	enum plane_id plane_id = to_intel_plane(plane_state->uapi.plane)->id;
5556 	int ret;
5557 
5558 	/* Watermarks calculated in master */
5559 	if (plane_state->planar_slave)
5560 		return 0;
5561 
5562 	if (plane_state->planar_linked_plane) {
5563 		const struct drm_framebuffer *fb = plane_state->hw.fb;
5564 		enum plane_id y_plane_id = plane_state->planar_linked_plane->id;
5565 
5566 		drm_WARN_ON(&dev_priv->drm,
5567 			    !intel_wm_plane_visible(crtc_state, plane_state));
5568 		drm_WARN_ON(&dev_priv->drm, !fb->format->is_yuv ||
5569 			    fb->format->num_planes == 1);
5570 
5571 		ret = skl_build_plane_wm_single(crtc_state, plane_state,
5572 						y_plane_id, 0);
5573 		if (ret)
5574 			return ret;
5575 
5576 		ret = skl_build_plane_wm_single(crtc_state, plane_state,
5577 						plane_id, 1);
5578 		if (ret)
5579 			return ret;
5580 	} else if (intel_wm_plane_visible(crtc_state, plane_state)) {
5581 		ret = skl_build_plane_wm_single(crtc_state, plane_state,
5582 						plane_id, 0);
5583 		if (ret)
5584 			return ret;
5585 	}
5586 
5587 	return 0;
5588 }
5589 
skl_build_pipe_wm(struct intel_crtc_state * crtc_state)5590 static int skl_build_pipe_wm(struct intel_crtc_state *crtc_state)
5591 {
5592 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
5593 	struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
5594 	struct intel_plane *plane;
5595 	const struct intel_plane_state *plane_state;
5596 	int ret;
5597 
5598 	/*
5599 	 * We'll only calculate watermarks for planes that are actually
5600 	 * enabled, so make sure all other planes are set as disabled.
5601 	 */
5602 	memset(pipe_wm->planes, 0, sizeof(pipe_wm->planes));
5603 
5604 	intel_atomic_crtc_state_for_each_plane_state(plane, plane_state,
5605 						     crtc_state) {
5606 
5607 		if (INTEL_GEN(dev_priv) >= 11)
5608 			ret = icl_build_plane_wm(crtc_state, plane_state);
5609 		else
5610 			ret = skl_build_plane_wm(crtc_state, plane_state);
5611 		if (ret)
5612 			return ret;
5613 	}
5614 
5615 	return 0;
5616 }
5617 
skl_ddb_entry_write(struct drm_i915_private * dev_priv,i915_reg_t reg,const struct skl_ddb_entry * entry)5618 static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
5619 				i915_reg_t reg,
5620 				const struct skl_ddb_entry *entry)
5621 {
5622 	if (entry->end)
5623 		intel_de_write_fw(dev_priv, reg,
5624 				  (entry->end - 1) << 16 | entry->start);
5625 	else
5626 		intel_de_write_fw(dev_priv, reg, 0);
5627 }
5628 
skl_write_wm_level(struct drm_i915_private * dev_priv,i915_reg_t reg,const struct skl_wm_level * level)5629 static void skl_write_wm_level(struct drm_i915_private *dev_priv,
5630 			       i915_reg_t reg,
5631 			       const struct skl_wm_level *level)
5632 {
5633 	u32 val = 0;
5634 
5635 	if (level->plane_en)
5636 		val |= PLANE_WM_EN;
5637 	if (level->ignore_lines)
5638 		val |= PLANE_WM_IGNORE_LINES;
5639 	val |= level->plane_res_b;
5640 	val |= level->plane_res_l << PLANE_WM_LINES_SHIFT;
5641 
5642 	intel_de_write_fw(dev_priv, reg, val);
5643 }
5644 
skl_write_plane_wm(struct intel_plane * plane,const struct intel_crtc_state * crtc_state)5645 void skl_write_plane_wm(struct intel_plane *plane,
5646 			const struct intel_crtc_state *crtc_state)
5647 {
5648 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
5649 	int level, max_level = ilk_wm_max_level(dev_priv);
5650 	enum plane_id plane_id = plane->id;
5651 	enum pipe pipe = plane->pipe;
5652 	const struct skl_plane_wm *wm =
5653 		&crtc_state->wm.skl.optimal.planes[plane_id];
5654 	const struct skl_ddb_entry *ddb_y =
5655 		&crtc_state->wm.skl.plane_ddb_y[plane_id];
5656 	const struct skl_ddb_entry *ddb_uv =
5657 		&crtc_state->wm.skl.plane_ddb_uv[plane_id];
5658 
5659 	for (level = 0; level <= max_level; level++) {
5660 		const struct skl_wm_level *wm_level;
5661 
5662 		wm_level = skl_plane_wm_level(crtc_state, plane_id, level);
5663 
5664 		skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level),
5665 				   wm_level);
5666 	}
5667 	skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id),
5668 			   &wm->trans_wm);
5669 
5670 	if (INTEL_GEN(dev_priv) >= 11) {
5671 		skl_ddb_entry_write(dev_priv,
5672 				    PLANE_BUF_CFG(pipe, plane_id), ddb_y);
5673 		return;
5674 	}
5675 
5676 	if (wm->is_planar)
5677 		swap(ddb_y, ddb_uv);
5678 
5679 	skl_ddb_entry_write(dev_priv,
5680 			    PLANE_BUF_CFG(pipe, plane_id), ddb_y);
5681 	skl_ddb_entry_write(dev_priv,
5682 			    PLANE_NV12_BUF_CFG(pipe, plane_id), ddb_uv);
5683 }
5684 
skl_write_cursor_wm(struct intel_plane * plane,const struct intel_crtc_state * crtc_state)5685 void skl_write_cursor_wm(struct intel_plane *plane,
5686 			 const struct intel_crtc_state *crtc_state)
5687 {
5688 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
5689 	int level, max_level = ilk_wm_max_level(dev_priv);
5690 	enum plane_id plane_id = plane->id;
5691 	enum pipe pipe = plane->pipe;
5692 	const struct skl_plane_wm *wm =
5693 		&crtc_state->wm.skl.optimal.planes[plane_id];
5694 	const struct skl_ddb_entry *ddb =
5695 		&crtc_state->wm.skl.plane_ddb_y[plane_id];
5696 
5697 	for (level = 0; level <= max_level; level++) {
5698 		const struct skl_wm_level *wm_level;
5699 
5700 		wm_level = skl_plane_wm_level(crtc_state, plane_id, level);
5701 
5702 		skl_write_wm_level(dev_priv, CUR_WM(pipe, level),
5703 				   wm_level);
5704 	}
5705 	skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), &wm->trans_wm);
5706 
5707 	skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), ddb);
5708 }
5709 
skl_wm_level_equals(const struct skl_wm_level * l1,const struct skl_wm_level * l2)5710 bool skl_wm_level_equals(const struct skl_wm_level *l1,
5711 			 const struct skl_wm_level *l2)
5712 {
5713 	return l1->plane_en == l2->plane_en &&
5714 		l1->ignore_lines == l2->ignore_lines &&
5715 		l1->plane_res_l == l2->plane_res_l &&
5716 		l1->plane_res_b == l2->plane_res_b;
5717 }
5718 
skl_plane_wm_equals(struct drm_i915_private * dev_priv,const struct skl_plane_wm * wm1,const struct skl_plane_wm * wm2)5719 static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv,
5720 				const struct skl_plane_wm *wm1,
5721 				const struct skl_plane_wm *wm2)
5722 {
5723 	int level, max_level = ilk_wm_max_level(dev_priv);
5724 
5725 	for (level = 0; level <= max_level; level++) {
5726 		/*
5727 		 * We don't check uv_wm as the hardware doesn't actually
5728 		 * use it. It only gets used for calculating the required
5729 		 * ddb allocation.
5730 		 */
5731 		if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]))
5732 			return false;
5733 	}
5734 
5735 	return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm);
5736 }
5737 
skl_ddb_entries_overlap(const struct skl_ddb_entry * a,const struct skl_ddb_entry * b)5738 static bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
5739 				    const struct skl_ddb_entry *b)
5740 {
5741 	return a->start < b->end && b->start < a->end;
5742 }
5743 
skl_ddb_allocation_overlaps(const struct skl_ddb_entry * ddb,const struct skl_ddb_entry * entries,int num_entries,int ignore_idx)5744 bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
5745 				 const struct skl_ddb_entry *entries,
5746 				 int num_entries, int ignore_idx)
5747 {
5748 	int i;
5749 
5750 	for (i = 0; i < num_entries; i++) {
5751 		if (i != ignore_idx &&
5752 		    skl_ddb_entries_overlap(ddb, &entries[i]))
5753 			return true;
5754 	}
5755 
5756 	return false;
5757 }
5758 
5759 static int
skl_ddb_add_affected_planes(const struct intel_crtc_state * old_crtc_state,struct intel_crtc_state * new_crtc_state)5760 skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
5761 			    struct intel_crtc_state *new_crtc_state)
5762 {
5763 	struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->uapi.state);
5764 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
5765 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5766 	struct intel_plane *plane;
5767 
5768 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
5769 		struct intel_plane_state *plane_state;
5770 		enum plane_id plane_id = plane->id;
5771 
5772 		if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id],
5773 					&new_crtc_state->wm.skl.plane_ddb_y[plane_id]) &&
5774 		    skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_uv[plane_id],
5775 					&new_crtc_state->wm.skl.plane_ddb_uv[plane_id]))
5776 			continue;
5777 
5778 		plane_state = intel_atomic_get_plane_state(state, plane);
5779 		if (IS_ERR(plane_state))
5780 			return PTR_ERR(plane_state);
5781 
5782 		new_crtc_state->update_planes |= BIT(plane_id);
5783 	}
5784 
5785 	return 0;
5786 }
5787 
5788 static int
skl_compute_ddb(struct intel_atomic_state * state)5789 skl_compute_ddb(struct intel_atomic_state *state)
5790 {
5791 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5792 	const struct intel_dbuf_state *old_dbuf_state;
5793 	const struct intel_dbuf_state *new_dbuf_state;
5794 	const struct intel_crtc_state *old_crtc_state;
5795 	struct intel_crtc_state *new_crtc_state;
5796 	struct intel_crtc *crtc;
5797 	int ret, i;
5798 
5799 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
5800 					    new_crtc_state, i) {
5801 		ret = skl_allocate_pipe_ddb(new_crtc_state);
5802 		if (ret)
5803 			return ret;
5804 
5805 		ret = skl_ddb_add_affected_planes(old_crtc_state,
5806 						  new_crtc_state);
5807 		if (ret)
5808 			return ret;
5809 	}
5810 
5811 	old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
5812 	new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
5813 
5814 	if (new_dbuf_state &&
5815 	    new_dbuf_state->enabled_slices != old_dbuf_state->enabled_slices)
5816 		drm_dbg_kms(&dev_priv->drm,
5817 			    "Enabled dbuf slices 0x%x -> 0x%x (out of %d dbuf slices)\n",
5818 			    old_dbuf_state->enabled_slices,
5819 			    new_dbuf_state->enabled_slices,
5820 			    INTEL_INFO(dev_priv)->num_supported_dbuf_slices);
5821 
5822 	return 0;
5823 }
5824 
enast(bool enable)5825 static char enast(bool enable)
5826 {
5827 	return enable ? '*' : ' ';
5828 }
5829 
5830 static void
skl_print_wm_changes(struct intel_atomic_state * state)5831 skl_print_wm_changes(struct intel_atomic_state *state)
5832 {
5833 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5834 	const struct intel_crtc_state *old_crtc_state;
5835 	const struct intel_crtc_state *new_crtc_state;
5836 	struct intel_plane *plane;
5837 	struct intel_crtc *crtc;
5838 	int i;
5839 
5840 	if (!drm_debug_enabled(DRM_UT_KMS))
5841 		return;
5842 
5843 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
5844 					    new_crtc_state, i) {
5845 		const struct skl_pipe_wm *old_pipe_wm, *new_pipe_wm;
5846 
5847 		old_pipe_wm = &old_crtc_state->wm.skl.optimal;
5848 		new_pipe_wm = &new_crtc_state->wm.skl.optimal;
5849 
5850 		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
5851 			enum plane_id plane_id = plane->id;
5852 			const struct skl_ddb_entry *old, *new;
5853 
5854 			old = &old_crtc_state->wm.skl.plane_ddb_y[plane_id];
5855 			new = &new_crtc_state->wm.skl.plane_ddb_y[plane_id];
5856 
5857 			if (skl_ddb_entry_equal(old, new))
5858 				continue;
5859 
5860 			drm_dbg_kms(&dev_priv->drm,
5861 				    "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
5862 				    plane->base.base.id, plane->base.name,
5863 				    old->start, old->end, new->start, new->end,
5864 				    skl_ddb_entry_size(old), skl_ddb_entry_size(new));
5865 		}
5866 
5867 		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
5868 			enum plane_id plane_id = plane->id;
5869 			const struct skl_plane_wm *old_wm, *new_wm;
5870 
5871 			old_wm = &old_pipe_wm->planes[plane_id];
5872 			new_wm = &new_pipe_wm->planes[plane_id];
5873 
5874 			if (skl_plane_wm_equals(dev_priv, old_wm, new_wm))
5875 				continue;
5876 
5877 			drm_dbg_kms(&dev_priv->drm,
5878 				    "[PLANE:%d:%s]   level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm"
5879 				    " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm\n",
5880 				    plane->base.base.id, plane->base.name,
5881 				    enast(old_wm->wm[0].plane_en), enast(old_wm->wm[1].plane_en),
5882 				    enast(old_wm->wm[2].plane_en), enast(old_wm->wm[3].plane_en),
5883 				    enast(old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en),
5884 				    enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en),
5885 				    enast(old_wm->trans_wm.plane_en),
5886 				    enast(old_wm->sagv_wm0.plane_en),
5887 				    enast(new_wm->wm[0].plane_en), enast(new_wm->wm[1].plane_en),
5888 				    enast(new_wm->wm[2].plane_en), enast(new_wm->wm[3].plane_en),
5889 				    enast(new_wm->wm[4].plane_en), enast(new_wm->wm[5].plane_en),
5890 				    enast(new_wm->wm[6].plane_en), enast(new_wm->wm[7].plane_en),
5891 				    enast(new_wm->trans_wm.plane_en),
5892 				    enast(new_wm->sagv_wm0.plane_en));
5893 
5894 			drm_dbg_kms(&dev_priv->drm,
5895 				    "[PLANE:%d:%s]   lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d"
5896 				      " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n",
5897 				    plane->base.base.id, plane->base.name,
5898 				    enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].plane_res_l,
5899 				    enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].plane_res_l,
5900 				    enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l,
5901 				    enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l,
5902 				    enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l,
5903 				    enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l,
5904 				    enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l,
5905 				    enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l,
5906 				    enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.plane_res_l,
5907 				    enast(old_wm->sagv_wm0.ignore_lines), old_wm->sagv_wm0.plane_res_l,
5908 
5909 				    enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].plane_res_l,
5910 				    enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].plane_res_l,
5911 				    enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].plane_res_l,
5912 				    enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].plane_res_l,
5913 				    enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].plane_res_l,
5914 				    enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].plane_res_l,
5915 				    enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].plane_res_l,
5916 				    enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].plane_res_l,
5917 				    enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.plane_res_l,
5918 				    enast(new_wm->sagv_wm0.ignore_lines), new_wm->sagv_wm0.plane_res_l);
5919 
5920 			drm_dbg_kms(&dev_priv->drm,
5921 				    "[PLANE:%d:%s]  blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
5922 				    " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
5923 				    plane->base.base.id, plane->base.name,
5924 				    old_wm->wm[0].plane_res_b, old_wm->wm[1].plane_res_b,
5925 				    old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b,
5926 				    old_wm->wm[4].plane_res_b, old_wm->wm[5].plane_res_b,
5927 				    old_wm->wm[6].plane_res_b, old_wm->wm[7].plane_res_b,
5928 				    old_wm->trans_wm.plane_res_b,
5929 				    old_wm->sagv_wm0.plane_res_b,
5930 				    new_wm->wm[0].plane_res_b, new_wm->wm[1].plane_res_b,
5931 				    new_wm->wm[2].plane_res_b, new_wm->wm[3].plane_res_b,
5932 				    new_wm->wm[4].plane_res_b, new_wm->wm[5].plane_res_b,
5933 				    new_wm->wm[6].plane_res_b, new_wm->wm[7].plane_res_b,
5934 				    new_wm->trans_wm.plane_res_b,
5935 				    new_wm->sagv_wm0.plane_res_b);
5936 
5937 			drm_dbg_kms(&dev_priv->drm,
5938 				    "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
5939 				    " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
5940 				    plane->base.base.id, plane->base.name,
5941 				    old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
5942 				    old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
5943 				    old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
5944 				    old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
5945 				    old_wm->trans_wm.min_ddb_alloc,
5946 				    old_wm->sagv_wm0.min_ddb_alloc,
5947 				    new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
5948 				    new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
5949 				    new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
5950 				    new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
5951 				    new_wm->trans_wm.min_ddb_alloc,
5952 				    new_wm->sagv_wm0.min_ddb_alloc);
5953 		}
5954 	}
5955 }
5956 
intel_add_affected_pipes(struct intel_atomic_state * state,u8 pipe_mask)5957 static int intel_add_affected_pipes(struct intel_atomic_state *state,
5958 				    u8 pipe_mask)
5959 {
5960 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5961 	struct intel_crtc *crtc;
5962 
5963 	for_each_intel_crtc(&dev_priv->drm, crtc) {
5964 		struct intel_crtc_state *crtc_state;
5965 
5966 		if ((pipe_mask & BIT(crtc->pipe)) == 0)
5967 			continue;
5968 
5969 		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
5970 		if (IS_ERR(crtc_state))
5971 			return PTR_ERR(crtc_state);
5972 	}
5973 
5974 	return 0;
5975 }
5976 
5977 static int
skl_ddb_add_affected_pipes(struct intel_atomic_state * state)5978 skl_ddb_add_affected_pipes(struct intel_atomic_state *state)
5979 {
5980 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5981 	struct intel_crtc_state *crtc_state;
5982 	struct intel_crtc *crtc;
5983 	int i, ret;
5984 
5985 	if (dev_priv->wm.distrust_bios_wm) {
5986 		/*
5987 		 * skl_ddb_get_pipe_allocation_limits() currently requires
5988 		 * all active pipes to be included in the state so that
5989 		 * it can redistribute the dbuf among them, and it really
5990 		 * wants to recompute things when distrust_bios_wm is set
5991 		 * so we add all the pipes to the state.
5992 		 */
5993 		ret = intel_add_affected_pipes(state, ~0);
5994 		if (ret)
5995 			return ret;
5996 	}
5997 
5998 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
5999 		struct intel_dbuf_state *new_dbuf_state;
6000 		const struct intel_dbuf_state *old_dbuf_state;
6001 
6002 		new_dbuf_state = intel_atomic_get_dbuf_state(state);
6003 		if (IS_ERR(new_dbuf_state))
6004 			return PTR_ERR(new_dbuf_state);
6005 
6006 		old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
6007 
6008 		new_dbuf_state->active_pipes =
6009 			intel_calc_active_pipes(state, old_dbuf_state->active_pipes);
6010 
6011 		if (old_dbuf_state->active_pipes == new_dbuf_state->active_pipes)
6012 			break;
6013 
6014 		ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
6015 		if (ret)
6016 			return ret;
6017 
6018 		/*
6019 		 * skl_ddb_get_pipe_allocation_limits() currently requires
6020 		 * all active pipes to be included in the state so that
6021 		 * it can redistribute the dbuf among them.
6022 		 */
6023 		ret = intel_add_affected_pipes(state,
6024 					       new_dbuf_state->active_pipes);
6025 		if (ret)
6026 			return ret;
6027 
6028 		break;
6029 	}
6030 
6031 	return 0;
6032 }
6033 
6034 /*
6035  * To make sure the cursor watermark registers are always consistent
6036  * with our computed state the following scenario needs special
6037  * treatment:
6038  *
6039  * 1. enable cursor
6040  * 2. move cursor entirely offscreen
6041  * 3. disable cursor
6042  *
6043  * Step 2. does call .disable_plane() but does not zero the watermarks
6044  * (since we consider an offscreen cursor still active for the purposes
6045  * of watermarks). Step 3. would not normally call .disable_plane()
6046  * because the actual plane visibility isn't changing, and we don't
6047  * deallocate the cursor ddb until the pipe gets disabled. So we must
6048  * force step 3. to call .disable_plane() to update the watermark
6049  * registers properly.
6050  *
6051  * Other planes do not suffer from this issues as their watermarks are
6052  * calculated based on the actual plane visibility. The only time this
6053  * can trigger for the other planes is during the initial readout as the
6054  * default value of the watermarks registers is not zero.
6055  */
skl_wm_add_affected_planes(struct intel_atomic_state * state,struct intel_crtc * crtc)6056 static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
6057 				      struct intel_crtc *crtc)
6058 {
6059 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6060 	const struct intel_crtc_state *old_crtc_state =
6061 		intel_atomic_get_old_crtc_state(state, crtc);
6062 	struct intel_crtc_state *new_crtc_state =
6063 		intel_atomic_get_new_crtc_state(state, crtc);
6064 	struct intel_plane *plane;
6065 
6066 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
6067 		struct intel_plane_state *plane_state;
6068 		enum plane_id plane_id = plane->id;
6069 
6070 		/*
6071 		 * Force a full wm update for every plane on modeset.
6072 		 * Required because the reset value of the wm registers
6073 		 * is non-zero, whereas we want all disabled planes to
6074 		 * have zero watermarks. So if we turn off the relevant
6075 		 * power well the hardware state will go out of sync
6076 		 * with the software state.
6077 		 */
6078 		if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi) &&
6079 		    skl_plane_wm_equals(dev_priv,
6080 					&old_crtc_state->wm.skl.optimal.planes[plane_id],
6081 					&new_crtc_state->wm.skl.optimal.planes[plane_id]))
6082 			continue;
6083 
6084 		plane_state = intel_atomic_get_plane_state(state, plane);
6085 		if (IS_ERR(plane_state))
6086 			return PTR_ERR(plane_state);
6087 
6088 		new_crtc_state->update_planes |= BIT(plane_id);
6089 	}
6090 
6091 	return 0;
6092 }
6093 
6094 static int
skl_compute_wm(struct intel_atomic_state * state)6095 skl_compute_wm(struct intel_atomic_state *state)
6096 {
6097 	struct intel_crtc *crtc;
6098 	struct intel_crtc_state *new_crtc_state;
6099 	struct intel_crtc_state *old_crtc_state;
6100 	int ret, i;
6101 
6102 	ret = skl_ddb_add_affected_pipes(state);
6103 	if (ret)
6104 		return ret;
6105 
6106 	/*
6107 	 * Calculate WM's for all pipes that are part of this transaction.
6108 	 * Note that skl_ddb_add_affected_pipes may have added more CRTC's that
6109 	 * weren't otherwise being modified if pipe allocations had to change.
6110 	 */
6111 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6112 					    new_crtc_state, i) {
6113 		ret = skl_build_pipe_wm(new_crtc_state);
6114 		if (ret)
6115 			return ret;
6116 	}
6117 
6118 	ret = skl_compute_ddb(state);
6119 	if (ret)
6120 		return ret;
6121 
6122 	ret = intel_compute_sagv_mask(state);
6123 	if (ret)
6124 		return ret;
6125 
6126 	/*
6127 	 * skl_compute_ddb() will have adjusted the final watermarks
6128 	 * based on how much ddb is available. Now we can actually
6129 	 * check if the final watermarks changed.
6130 	 */
6131 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6132 					    new_crtc_state, i) {
6133 		ret = skl_wm_add_affected_planes(state, crtc);
6134 		if (ret)
6135 			return ret;
6136 	}
6137 
6138 	skl_print_wm_changes(state);
6139 
6140 	return 0;
6141 }
6142 
ilk_compute_wm_config(struct drm_i915_private * dev_priv,struct intel_wm_config * config)6143 static void ilk_compute_wm_config(struct drm_i915_private *dev_priv,
6144 				  struct intel_wm_config *config)
6145 {
6146 	struct intel_crtc *crtc;
6147 
6148 	/* Compute the currently _active_ config */
6149 	for_each_intel_crtc(&dev_priv->drm, crtc) {
6150 		const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
6151 
6152 		if (!wm->pipe_enabled)
6153 			continue;
6154 
6155 		config->sprites_enabled |= wm->sprites_enabled;
6156 		config->sprites_scaled |= wm->sprites_scaled;
6157 		config->num_pipes_active++;
6158 	}
6159 }
6160 
ilk_program_watermarks(struct drm_i915_private * dev_priv)6161 static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
6162 {
6163 	struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
6164 	struct ilk_wm_maximums max;
6165 	struct intel_wm_config config = {};
6166 	struct ilk_wm_values results = {};
6167 	enum intel_ddb_partitioning partitioning;
6168 
6169 	ilk_compute_wm_config(dev_priv, &config);
6170 
6171 	ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_1_2, &max);
6172 	ilk_wm_merge(dev_priv, &config, &max, &lp_wm_1_2);
6173 
6174 	/* 5/6 split only in single pipe config on IVB+ */
6175 	if (INTEL_GEN(dev_priv) >= 7 &&
6176 	    config.num_pipes_active == 1 && config.sprites_enabled) {
6177 		ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_5_6, &max);
6178 		ilk_wm_merge(dev_priv, &config, &max, &lp_wm_5_6);
6179 
6180 		best_lp_wm = ilk_find_best_result(dev_priv, &lp_wm_1_2, &lp_wm_5_6);
6181 	} else {
6182 		best_lp_wm = &lp_wm_1_2;
6183 	}
6184 
6185 	partitioning = (best_lp_wm == &lp_wm_1_2) ?
6186 		       INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
6187 
6188 	ilk_compute_wm_results(dev_priv, best_lp_wm, partitioning, &results);
6189 
6190 	ilk_write_wm_values(dev_priv, &results);
6191 }
6192 
ilk_initial_watermarks(struct intel_atomic_state * state,struct intel_crtc * crtc)6193 static void ilk_initial_watermarks(struct intel_atomic_state *state,
6194 				   struct intel_crtc *crtc)
6195 {
6196 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6197 	const struct intel_crtc_state *crtc_state =
6198 		intel_atomic_get_new_crtc_state(state, crtc);
6199 
6200 	mutex_lock(&dev_priv->wm.wm_mutex);
6201 	crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate;
6202 	ilk_program_watermarks(dev_priv);
6203 	mutex_unlock(&dev_priv->wm.wm_mutex);
6204 }
6205 
ilk_optimize_watermarks(struct intel_atomic_state * state,struct intel_crtc * crtc)6206 static void ilk_optimize_watermarks(struct intel_atomic_state *state,
6207 				    struct intel_crtc *crtc)
6208 {
6209 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6210 	const struct intel_crtc_state *crtc_state =
6211 		intel_atomic_get_new_crtc_state(state, crtc);
6212 
6213 	if (!crtc_state->wm.need_postvbl_update)
6214 		return;
6215 
6216 	mutex_lock(&dev_priv->wm.wm_mutex);
6217 	crtc->wm.active.ilk = crtc_state->wm.ilk.optimal;
6218 	ilk_program_watermarks(dev_priv);
6219 	mutex_unlock(&dev_priv->wm.wm_mutex);
6220 }
6221 
skl_wm_level_from_reg_val(u32 val,struct skl_wm_level * level)6222 static void skl_wm_level_from_reg_val(u32 val, struct skl_wm_level *level)
6223 {
6224 	level->plane_en = val & PLANE_WM_EN;
6225 	level->ignore_lines = val & PLANE_WM_IGNORE_LINES;
6226 	level->plane_res_b = val & PLANE_WM_BLOCKS_MASK;
6227 	level->plane_res_l = (val >> PLANE_WM_LINES_SHIFT) &
6228 		PLANE_WM_LINES_MASK;
6229 }
6230 
skl_pipe_wm_get_hw_state(struct intel_crtc * crtc,struct skl_pipe_wm * out)6231 void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
6232 			      struct skl_pipe_wm *out)
6233 {
6234 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6235 	enum pipe pipe = crtc->pipe;
6236 	int level, max_level;
6237 	enum plane_id plane_id;
6238 	u32 val;
6239 
6240 	max_level = ilk_wm_max_level(dev_priv);
6241 
6242 	for_each_plane_id_on_crtc(crtc, plane_id) {
6243 		struct skl_plane_wm *wm = &out->planes[plane_id];
6244 
6245 		for (level = 0; level <= max_level; level++) {
6246 			if (plane_id != PLANE_CURSOR)
6247 				val = I915_READ(PLANE_WM(pipe, plane_id, level));
6248 			else
6249 				val = I915_READ(CUR_WM(pipe, level));
6250 
6251 			skl_wm_level_from_reg_val(val, &wm->wm[level]);
6252 		}
6253 
6254 		if (INTEL_GEN(dev_priv) >= 12)
6255 			wm->sagv_wm0 = wm->wm[0];
6256 
6257 		if (plane_id != PLANE_CURSOR)
6258 			val = I915_READ(PLANE_WM_TRANS(pipe, plane_id));
6259 		else
6260 			val = I915_READ(CUR_WM_TRANS(pipe));
6261 
6262 		skl_wm_level_from_reg_val(val, &wm->trans_wm);
6263 	}
6264 
6265 	if (!crtc->active)
6266 		return;
6267 }
6268 
skl_wm_get_hw_state(struct drm_i915_private * dev_priv)6269 void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
6270 {
6271 	struct intel_crtc *crtc;
6272 	struct intel_crtc_state *crtc_state;
6273 
6274 	for_each_intel_crtc(&dev_priv->drm, crtc) {
6275 		crtc_state = to_intel_crtc_state(crtc->base.state);
6276 
6277 		skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
6278 	}
6279 
6280 	if (dev_priv->active_pipes) {
6281 		/* Fully recompute DDB on first atomic commit */
6282 		dev_priv->wm.distrust_bios_wm = true;
6283 	}
6284 }
6285 
ilk_pipe_wm_get_hw_state(struct intel_crtc * crtc)6286 static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
6287 {
6288 	struct drm_device *dev = crtc->base.dev;
6289 	struct drm_i915_private *dev_priv = to_i915(dev);
6290 	struct ilk_wm_values *hw = &dev_priv->wm.hw;
6291 	struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
6292 	struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal;
6293 	enum pipe pipe = crtc->pipe;
6294 	static const i915_reg_t wm0_pipe_reg[] = {
6295 		[PIPE_A] = WM0_PIPEA_ILK,
6296 		[PIPE_B] = WM0_PIPEB_ILK,
6297 		[PIPE_C] = WM0_PIPEC_IVB,
6298 	};
6299 
6300 	hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
6301 
6302 	memset(active, 0, sizeof(*active));
6303 
6304 	active->pipe_enabled = crtc->active;
6305 
6306 	if (active->pipe_enabled) {
6307 		u32 tmp = hw->wm_pipe[pipe];
6308 
6309 		/*
6310 		 * For active pipes LP0 watermark is marked as
6311 		 * enabled, and LP1+ watermaks as disabled since
6312 		 * we can't really reverse compute them in case
6313 		 * multiple pipes are active.
6314 		 */
6315 		active->wm[0].enable = true;
6316 		active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
6317 		active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
6318 		active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
6319 	} else {
6320 		int level, max_level = ilk_wm_max_level(dev_priv);
6321 
6322 		/*
6323 		 * For inactive pipes, all watermark levels
6324 		 * should be marked as enabled but zeroed,
6325 		 * which is what we'd compute them to.
6326 		 */
6327 		for (level = 0; level <= max_level; level++)
6328 			active->wm[level].enable = true;
6329 	}
6330 
6331 	crtc->wm.active.ilk = *active;
6332 }
6333 
6334 #define _FW_WM(value, plane) \
6335 	(((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
6336 #define _FW_WM_VLV(value, plane) \
6337 	(((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
6338 
g4x_read_wm_values(struct drm_i915_private * dev_priv,struct g4x_wm_values * wm)6339 static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
6340 			       struct g4x_wm_values *wm)
6341 {
6342 	u32 tmp;
6343 
6344 	tmp = I915_READ(DSPFW1);
6345 	wm->sr.plane = _FW_WM(tmp, SR);
6346 	wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
6347 	wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB);
6348 	wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA);
6349 
6350 	tmp = I915_READ(DSPFW2);
6351 	wm->fbc_en = tmp & DSPFW_FBC_SR_EN;
6352 	wm->sr.fbc = _FW_WM(tmp, FBC_SR);
6353 	wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR);
6354 	wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEB);
6355 	wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
6356 	wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA);
6357 
6358 	tmp = I915_READ(DSPFW3);
6359 	wm->hpll_en = tmp & DSPFW_HPLL_SR_EN;
6360 	wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
6361 	wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR);
6362 	wm->hpll.plane = _FW_WM(tmp, HPLL_SR);
6363 }
6364 
vlv_read_wm_values(struct drm_i915_private * dev_priv,struct vlv_wm_values * wm)6365 static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
6366 			       struct vlv_wm_values *wm)
6367 {
6368 	enum pipe pipe;
6369 	u32 tmp;
6370 
6371 	for_each_pipe(dev_priv, pipe) {
6372 		tmp = I915_READ(VLV_DDL(pipe));
6373 
6374 		wm->ddl[pipe].plane[PLANE_PRIMARY] =
6375 			(tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
6376 		wm->ddl[pipe].plane[PLANE_CURSOR] =
6377 			(tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
6378 		wm->ddl[pipe].plane[PLANE_SPRITE0] =
6379 			(tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
6380 		wm->ddl[pipe].plane[PLANE_SPRITE1] =
6381 			(tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
6382 	}
6383 
6384 	tmp = I915_READ(DSPFW1);
6385 	wm->sr.plane = _FW_WM(tmp, SR);
6386 	wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
6387 	wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB);
6388 	wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA);
6389 
6390 	tmp = I915_READ(DSPFW2);
6391 	wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB);
6392 	wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
6393 	wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA);
6394 
6395 	tmp = I915_READ(DSPFW3);
6396 	wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
6397 
6398 	if (IS_CHERRYVIEW(dev_priv)) {
6399 		tmp = I915_READ(DSPFW7_CHV);
6400 		wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
6401 		wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
6402 
6403 		tmp = I915_READ(DSPFW8_CHV);
6404 		wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF);
6405 		wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE);
6406 
6407 		tmp = I915_READ(DSPFW9_CHV);
6408 		wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC);
6409 		wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC);
6410 
6411 		tmp = I915_READ(DSPHOWM);
6412 		wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
6413 		wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
6414 		wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
6415 		wm->pipe[PIPE_C].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEC_HI) << 8;
6416 		wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
6417 		wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
6418 		wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
6419 		wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
6420 		wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
6421 		wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
6422 	} else {
6423 		tmp = I915_READ(DSPFW7);
6424 		wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
6425 		wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
6426 
6427 		tmp = I915_READ(DSPHOWM);
6428 		wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
6429 		wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
6430 		wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
6431 		wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
6432 		wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
6433 		wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
6434 		wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
6435 	}
6436 }
6437 
6438 #undef _FW_WM
6439 #undef _FW_WM_VLV
6440 
g4x_wm_get_hw_state(struct drm_i915_private * dev_priv)6441 void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
6442 {
6443 	struct g4x_wm_values *wm = &dev_priv->wm.g4x;
6444 	struct intel_crtc *crtc;
6445 
6446 	g4x_read_wm_values(dev_priv, wm);
6447 
6448 	wm->cxsr = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
6449 
6450 	for_each_intel_crtc(&dev_priv->drm, crtc) {
6451 		struct intel_crtc_state *crtc_state =
6452 			to_intel_crtc_state(crtc->base.state);
6453 		struct g4x_wm_state *active = &crtc->wm.active.g4x;
6454 		struct g4x_pipe_wm *raw;
6455 		enum pipe pipe = crtc->pipe;
6456 		enum plane_id plane_id;
6457 		int level, max_level;
6458 
6459 		active->cxsr = wm->cxsr;
6460 		active->hpll_en = wm->hpll_en;
6461 		active->fbc_en = wm->fbc_en;
6462 
6463 		active->sr = wm->sr;
6464 		active->hpll = wm->hpll;
6465 
6466 		for_each_plane_id_on_crtc(crtc, plane_id) {
6467 			active->wm.plane[plane_id] =
6468 				wm->pipe[pipe].plane[plane_id];
6469 		}
6470 
6471 		if (wm->cxsr && wm->hpll_en)
6472 			max_level = G4X_WM_LEVEL_HPLL;
6473 		else if (wm->cxsr)
6474 			max_level = G4X_WM_LEVEL_SR;
6475 		else
6476 			max_level = G4X_WM_LEVEL_NORMAL;
6477 
6478 		level = G4X_WM_LEVEL_NORMAL;
6479 		raw = &crtc_state->wm.g4x.raw[level];
6480 		for_each_plane_id_on_crtc(crtc, plane_id)
6481 			raw->plane[plane_id] = active->wm.plane[plane_id];
6482 
6483 		if (++level > max_level)
6484 			goto out;
6485 
6486 		raw = &crtc_state->wm.g4x.raw[level];
6487 		raw->plane[PLANE_PRIMARY] = active->sr.plane;
6488 		raw->plane[PLANE_CURSOR] = active->sr.cursor;
6489 		raw->plane[PLANE_SPRITE0] = 0;
6490 		raw->fbc = active->sr.fbc;
6491 
6492 		if (++level > max_level)
6493 			goto out;
6494 
6495 		raw = &crtc_state->wm.g4x.raw[level];
6496 		raw->plane[PLANE_PRIMARY] = active->hpll.plane;
6497 		raw->plane[PLANE_CURSOR] = active->hpll.cursor;
6498 		raw->plane[PLANE_SPRITE0] = 0;
6499 		raw->fbc = active->hpll.fbc;
6500 
6501 	out:
6502 		for_each_plane_id_on_crtc(crtc, plane_id)
6503 			g4x_raw_plane_wm_set(crtc_state, level,
6504 					     plane_id, USHRT_MAX);
6505 		g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
6506 
6507 		crtc_state->wm.g4x.optimal = *active;
6508 		crtc_state->wm.g4x.intermediate = *active;
6509 
6510 		drm_dbg_kms(&dev_priv->drm,
6511 			    "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n",
6512 			    pipe_name(pipe),
6513 			    wm->pipe[pipe].plane[PLANE_PRIMARY],
6514 			    wm->pipe[pipe].plane[PLANE_CURSOR],
6515 			    wm->pipe[pipe].plane[PLANE_SPRITE0]);
6516 	}
6517 
6518 	drm_dbg_kms(&dev_priv->drm,
6519 		    "Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n",
6520 		    wm->sr.plane, wm->sr.cursor, wm->sr.fbc);
6521 	drm_dbg_kms(&dev_priv->drm,
6522 		    "Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n",
6523 		    wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc);
6524 	drm_dbg_kms(&dev_priv->drm, "Initial SR=%s HPLL=%s FBC=%s\n",
6525 		    yesno(wm->cxsr), yesno(wm->hpll_en), yesno(wm->fbc_en));
6526 }
6527 
g4x_wm_sanitize(struct drm_i915_private * dev_priv)6528 void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
6529 {
6530 	struct intel_plane *plane;
6531 	struct intel_crtc *crtc;
6532 
6533 	mutex_lock(&dev_priv->wm.wm_mutex);
6534 
6535 	for_each_intel_plane(&dev_priv->drm, plane) {
6536 		struct intel_crtc *crtc =
6537 			intel_get_crtc_for_pipe(dev_priv, plane->pipe);
6538 		struct intel_crtc_state *crtc_state =
6539 			to_intel_crtc_state(crtc->base.state);
6540 		struct intel_plane_state *plane_state =
6541 			to_intel_plane_state(plane->base.state);
6542 		struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
6543 		enum plane_id plane_id = plane->id;
6544 		int level;
6545 
6546 		if (plane_state->uapi.visible)
6547 			continue;
6548 
6549 		for (level = 0; level < 3; level++) {
6550 			struct g4x_pipe_wm *raw =
6551 				&crtc_state->wm.g4x.raw[level];
6552 
6553 			raw->plane[plane_id] = 0;
6554 			wm_state->wm.plane[plane_id] = 0;
6555 		}
6556 
6557 		if (plane_id == PLANE_PRIMARY) {
6558 			for (level = 0; level < 3; level++) {
6559 				struct g4x_pipe_wm *raw =
6560 					&crtc_state->wm.g4x.raw[level];
6561 				raw->fbc = 0;
6562 			}
6563 
6564 			wm_state->sr.fbc = 0;
6565 			wm_state->hpll.fbc = 0;
6566 			wm_state->fbc_en = false;
6567 		}
6568 	}
6569 
6570 	for_each_intel_crtc(&dev_priv->drm, crtc) {
6571 		struct intel_crtc_state *crtc_state =
6572 			to_intel_crtc_state(crtc->base.state);
6573 
6574 		crtc_state->wm.g4x.intermediate =
6575 			crtc_state->wm.g4x.optimal;
6576 		crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
6577 	}
6578 
6579 	g4x_program_watermarks(dev_priv);
6580 
6581 	mutex_unlock(&dev_priv->wm.wm_mutex);
6582 }
6583 
vlv_wm_get_hw_state(struct drm_i915_private * dev_priv)6584 void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
6585 {
6586 	struct vlv_wm_values *wm = &dev_priv->wm.vlv;
6587 	struct intel_crtc *crtc;
6588 	u32 val;
6589 
6590 	vlv_read_wm_values(dev_priv, wm);
6591 
6592 	wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
6593 	wm->level = VLV_WM_LEVEL_PM2;
6594 
6595 	if (IS_CHERRYVIEW(dev_priv)) {
6596 		vlv_punit_get(dev_priv);
6597 
6598 		val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
6599 		if (val & DSP_MAXFIFO_PM5_ENABLE)
6600 			wm->level = VLV_WM_LEVEL_PM5;
6601 
6602 		/*
6603 		 * If DDR DVFS is disabled in the BIOS, Punit
6604 		 * will never ack the request. So if that happens
6605 		 * assume we don't have to enable/disable DDR DVFS
6606 		 * dynamically. To test that just set the REQ_ACK
6607 		 * bit to poke the Punit, but don't change the
6608 		 * HIGH/LOW bits so that we don't actually change
6609 		 * the current state.
6610 		 */
6611 		val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
6612 		val |= FORCE_DDR_FREQ_REQ_ACK;
6613 		vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
6614 
6615 		if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
6616 			      FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
6617 			drm_dbg_kms(&dev_priv->drm,
6618 				    "Punit not acking DDR DVFS request, "
6619 				    "assuming DDR DVFS is disabled\n");
6620 			dev_priv->wm.max_level = VLV_WM_LEVEL_PM5;
6621 		} else {
6622 			val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
6623 			if ((val & FORCE_DDR_HIGH_FREQ) == 0)
6624 				wm->level = VLV_WM_LEVEL_DDR_DVFS;
6625 		}
6626 
6627 		vlv_punit_put(dev_priv);
6628 	}
6629 
6630 	for_each_intel_crtc(&dev_priv->drm, crtc) {
6631 		struct intel_crtc_state *crtc_state =
6632 			to_intel_crtc_state(crtc->base.state);
6633 		struct vlv_wm_state *active = &crtc->wm.active.vlv;
6634 		const struct vlv_fifo_state *fifo_state =
6635 			&crtc_state->wm.vlv.fifo_state;
6636 		enum pipe pipe = crtc->pipe;
6637 		enum plane_id plane_id;
6638 		int level;
6639 
6640 		vlv_get_fifo_size(crtc_state);
6641 
6642 		active->num_levels = wm->level + 1;
6643 		active->cxsr = wm->cxsr;
6644 
6645 		for (level = 0; level < active->num_levels; level++) {
6646 			struct g4x_pipe_wm *raw =
6647 				&crtc_state->wm.vlv.raw[level];
6648 
6649 			active->sr[level].plane = wm->sr.plane;
6650 			active->sr[level].cursor = wm->sr.cursor;
6651 
6652 			for_each_plane_id_on_crtc(crtc, plane_id) {
6653 				active->wm[level].plane[plane_id] =
6654 					wm->pipe[pipe].plane[plane_id];
6655 
6656 				raw->plane[plane_id] =
6657 					vlv_invert_wm_value(active->wm[level].plane[plane_id],
6658 							    fifo_state->plane[plane_id]);
6659 			}
6660 		}
6661 
6662 		for_each_plane_id_on_crtc(crtc, plane_id)
6663 			vlv_raw_plane_wm_set(crtc_state, level,
6664 					     plane_id, USHRT_MAX);
6665 		vlv_invalidate_wms(crtc, active, level);
6666 
6667 		crtc_state->wm.vlv.optimal = *active;
6668 		crtc_state->wm.vlv.intermediate = *active;
6669 
6670 		drm_dbg_kms(&dev_priv->drm,
6671 			    "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
6672 			    pipe_name(pipe),
6673 			    wm->pipe[pipe].plane[PLANE_PRIMARY],
6674 			    wm->pipe[pipe].plane[PLANE_CURSOR],
6675 			    wm->pipe[pipe].plane[PLANE_SPRITE0],
6676 			    wm->pipe[pipe].plane[PLANE_SPRITE1]);
6677 	}
6678 
6679 	drm_dbg_kms(&dev_priv->drm,
6680 		    "Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
6681 		    wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
6682 }
6683 
vlv_wm_sanitize(struct drm_i915_private * dev_priv)6684 void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
6685 {
6686 	struct intel_plane *plane;
6687 	struct intel_crtc *crtc;
6688 
6689 	mutex_lock(&dev_priv->wm.wm_mutex);
6690 
6691 	for_each_intel_plane(&dev_priv->drm, plane) {
6692 		struct intel_crtc *crtc =
6693 			intel_get_crtc_for_pipe(dev_priv, plane->pipe);
6694 		struct intel_crtc_state *crtc_state =
6695 			to_intel_crtc_state(crtc->base.state);
6696 		struct intel_plane_state *plane_state =
6697 			to_intel_plane_state(plane->base.state);
6698 		struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
6699 		const struct vlv_fifo_state *fifo_state =
6700 			&crtc_state->wm.vlv.fifo_state;
6701 		enum plane_id plane_id = plane->id;
6702 		int level;
6703 
6704 		if (plane_state->uapi.visible)
6705 			continue;
6706 
6707 		for (level = 0; level < wm_state->num_levels; level++) {
6708 			struct g4x_pipe_wm *raw =
6709 				&crtc_state->wm.vlv.raw[level];
6710 
6711 			raw->plane[plane_id] = 0;
6712 
6713 			wm_state->wm[level].plane[plane_id] =
6714 				vlv_invert_wm_value(raw->plane[plane_id],
6715 						    fifo_state->plane[plane_id]);
6716 		}
6717 	}
6718 
6719 	for_each_intel_crtc(&dev_priv->drm, crtc) {
6720 		struct intel_crtc_state *crtc_state =
6721 			to_intel_crtc_state(crtc->base.state);
6722 
6723 		crtc_state->wm.vlv.intermediate =
6724 			crtc_state->wm.vlv.optimal;
6725 		crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
6726 	}
6727 
6728 	vlv_program_watermarks(dev_priv);
6729 
6730 	mutex_unlock(&dev_priv->wm.wm_mutex);
6731 }
6732 
6733 /*
6734  * FIXME should probably kill this and improve
6735  * the real watermark readout/sanitation instead
6736  */
ilk_init_lp_watermarks(struct drm_i915_private * dev_priv)6737 static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
6738 {
6739 	I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
6740 	I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
6741 	I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
6742 
6743 	/*
6744 	 * Don't touch WM1S_LP_EN here.
6745 	 * Doing so could cause underruns.
6746 	 */
6747 }
6748 
ilk_wm_get_hw_state(struct drm_i915_private * dev_priv)6749 void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
6750 {
6751 	struct ilk_wm_values *hw = &dev_priv->wm.hw;
6752 	struct intel_crtc *crtc;
6753 
6754 	ilk_init_lp_watermarks(dev_priv);
6755 
6756 	for_each_intel_crtc(&dev_priv->drm, crtc)
6757 		ilk_pipe_wm_get_hw_state(crtc);
6758 
6759 	hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
6760 	hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
6761 	hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
6762 
6763 	hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
6764 	if (INTEL_GEN(dev_priv) >= 7) {
6765 		hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
6766 		hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
6767 	}
6768 
6769 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
6770 		hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
6771 			INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
6772 	else if (IS_IVYBRIDGE(dev_priv))
6773 		hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
6774 			INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
6775 
6776 	hw->enable_fbc_wm =
6777 		!(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
6778 }
6779 
6780 /**
6781  * intel_update_watermarks - update FIFO watermark values based on current modes
6782  * @crtc: the #intel_crtc on which to compute the WM
6783  *
6784  * Calculate watermark values for the various WM regs based on current mode
6785  * and plane configuration.
6786  *
6787  * There are several cases to deal with here:
6788  *   - normal (i.e. non-self-refresh)
6789  *   - self-refresh (SR) mode
6790  *   - lines are large relative to FIFO size (buffer can hold up to 2)
6791  *   - lines are small relative to FIFO size (buffer can hold more than 2
6792  *     lines), so need to account for TLB latency
6793  *
6794  *   The normal calculation is:
6795  *     watermark = dotclock * bytes per pixel * latency
6796  *   where latency is platform & configuration dependent (we assume pessimal
6797  *   values here).
6798  *
6799  *   The SR calculation is:
6800  *     watermark = (trunc(latency/line time)+1) * surface width *
6801  *       bytes per pixel
6802  *   where
6803  *     line time = htotal / dotclock
6804  *     surface width = hdisplay for normal plane and 64 for cursor
6805  *   and latency is assumed to be high, as above.
6806  *
6807  * The final value programmed to the register should always be rounded up,
6808  * and include an extra 2 entries to account for clock crossings.
6809  *
6810  * We don't use the sprite, so we can ignore that.  And on Crestline we have
6811  * to set the non-SR watermarks to 8.
6812  */
intel_update_watermarks(struct intel_crtc * crtc)6813 void intel_update_watermarks(struct intel_crtc *crtc)
6814 {
6815 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6816 
6817 	if (dev_priv->display.update_wm)
6818 		dev_priv->display.update_wm(crtc);
6819 }
6820 
intel_enable_ipc(struct drm_i915_private * dev_priv)6821 void intel_enable_ipc(struct drm_i915_private *dev_priv)
6822 {
6823 	u32 val;
6824 
6825 	if (!HAS_IPC(dev_priv))
6826 		return;
6827 
6828 	val = I915_READ(DISP_ARB_CTL2);
6829 
6830 	if (dev_priv->ipc_enabled)
6831 		val |= DISP_IPC_ENABLE;
6832 	else
6833 		val &= ~DISP_IPC_ENABLE;
6834 
6835 	I915_WRITE(DISP_ARB_CTL2, val);
6836 }
6837 
intel_can_enable_ipc(struct drm_i915_private * dev_priv)6838 static bool intel_can_enable_ipc(struct drm_i915_private *dev_priv)
6839 {
6840 	/* Display WA #0477 WaDisableIPC: skl */
6841 	if (IS_SKYLAKE(dev_priv))
6842 		return false;
6843 
6844 	/* Display WA #1141: SKL:all KBL:all CFL */
6845 	if (IS_KABYLAKE(dev_priv) ||
6846 	    IS_COFFEELAKE(dev_priv) ||
6847 	    IS_COMETLAKE(dev_priv))
6848 		return dev_priv->dram_info.symmetric_memory;
6849 
6850 	return true;
6851 }
6852 
intel_init_ipc(struct drm_i915_private * dev_priv)6853 void intel_init_ipc(struct drm_i915_private *dev_priv)
6854 {
6855 	if (!HAS_IPC(dev_priv))
6856 		return;
6857 
6858 	dev_priv->ipc_enabled = intel_can_enable_ipc(dev_priv);
6859 
6860 	intel_enable_ipc(dev_priv);
6861 }
6862 
ibx_init_clock_gating(struct drm_i915_private * dev_priv)6863 static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
6864 {
6865 	/*
6866 	 * On Ibex Peak and Cougar Point, we need to disable clock
6867 	 * gating for the panel power sequencer or it will fail to
6868 	 * start up when no ports are active.
6869 	 */
6870 	I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
6871 }
6872 
g4x_disable_trickle_feed(struct drm_i915_private * dev_priv)6873 static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
6874 {
6875 	enum pipe pipe;
6876 
6877 	for_each_pipe(dev_priv, pipe) {
6878 		I915_WRITE(DSPCNTR(pipe),
6879 			   I915_READ(DSPCNTR(pipe)) |
6880 			   DISPPLANE_TRICKLE_FEED_DISABLE);
6881 
6882 		I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe)));
6883 		POSTING_READ(DSPSURF(pipe));
6884 	}
6885 }
6886 
ilk_init_clock_gating(struct drm_i915_private * dev_priv)6887 static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
6888 {
6889 	u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
6890 
6891 	/*
6892 	 * Required for FBC
6893 	 * WaFbcDisableDpfcClockGating:ilk
6894 	 */
6895 	dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
6896 		   ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
6897 		   ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
6898 
6899 	I915_WRITE(PCH_3DCGDIS0,
6900 		   MARIUNIT_CLOCK_GATE_DISABLE |
6901 		   SVSMUNIT_CLOCK_GATE_DISABLE);
6902 	I915_WRITE(PCH_3DCGDIS1,
6903 		   VFMUNIT_CLOCK_GATE_DISABLE);
6904 
6905 	/*
6906 	 * According to the spec the following bits should be set in
6907 	 * order to enable memory self-refresh
6908 	 * The bit 22/21 of 0x42004
6909 	 * The bit 5 of 0x42020
6910 	 * The bit 15 of 0x45000
6911 	 */
6912 	I915_WRITE(ILK_DISPLAY_CHICKEN2,
6913 		   (I915_READ(ILK_DISPLAY_CHICKEN2) |
6914 		    ILK_DPARB_GATE | ILK_VSDPFD_FULL));
6915 	dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
6916 	I915_WRITE(DISP_ARB_CTL,
6917 		   (I915_READ(DISP_ARB_CTL) |
6918 		    DISP_FBC_WM_DIS));
6919 
6920 	/*
6921 	 * Based on the document from hardware guys the following bits
6922 	 * should be set unconditionally in order to enable FBC.
6923 	 * The bit 22 of 0x42000
6924 	 * The bit 22 of 0x42004
6925 	 * The bit 7,8,9 of 0x42020.
6926 	 */
6927 	if (IS_IRONLAKE_M(dev_priv)) {
6928 		/* WaFbcAsynchFlipDisableFbcQueue:ilk */
6929 		I915_WRITE(ILK_DISPLAY_CHICKEN1,
6930 			   I915_READ(ILK_DISPLAY_CHICKEN1) |
6931 			   ILK_FBCQ_DIS);
6932 		I915_WRITE(ILK_DISPLAY_CHICKEN2,
6933 			   I915_READ(ILK_DISPLAY_CHICKEN2) |
6934 			   ILK_DPARB_GATE);
6935 	}
6936 
6937 	I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
6938 
6939 	I915_WRITE(ILK_DISPLAY_CHICKEN2,
6940 		   I915_READ(ILK_DISPLAY_CHICKEN2) |
6941 		   ILK_ELPIN_409_SELECT);
6942 
6943 	g4x_disable_trickle_feed(dev_priv);
6944 
6945 	ibx_init_clock_gating(dev_priv);
6946 }
6947 
cpt_init_clock_gating(struct drm_i915_private * dev_priv)6948 static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
6949 {
6950 	enum pipe pipe;
6951 	u32 val;
6952 
6953 	/*
6954 	 * On Ibex Peak and Cougar Point, we need to disable clock
6955 	 * gating for the panel power sequencer or it will fail to
6956 	 * start up when no ports are active.
6957 	 */
6958 	I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
6959 		   PCH_DPLUNIT_CLOCK_GATE_DISABLE |
6960 		   PCH_CPUNIT_CLOCK_GATE_DISABLE);
6961 	I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
6962 		   DPLS_EDP_PPS_FIX_DIS);
6963 	/* The below fixes the weird display corruption, a few pixels shifted
6964 	 * downward, on (only) LVDS of some HP laptops with IVY.
6965 	 */
6966 	for_each_pipe(dev_priv, pipe) {
6967 		val = I915_READ(TRANS_CHICKEN2(pipe));
6968 		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
6969 		val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
6970 		if (dev_priv->vbt.fdi_rx_polarity_inverted)
6971 			val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
6972 		val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
6973 		val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
6974 		I915_WRITE(TRANS_CHICKEN2(pipe), val);
6975 	}
6976 	/* WADP0ClockGatingDisable */
6977 	for_each_pipe(dev_priv, pipe) {
6978 		I915_WRITE(TRANS_CHICKEN1(pipe),
6979 			   TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
6980 	}
6981 }
6982 
gen6_check_mch_setup(struct drm_i915_private * dev_priv)6983 static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
6984 {
6985 	u32 tmp;
6986 
6987 	tmp = I915_READ(MCH_SSKPD);
6988 	if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
6989 		drm_dbg_kms(&dev_priv->drm,
6990 			    "Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
6991 			    tmp);
6992 }
6993 
gen6_init_clock_gating(struct drm_i915_private * dev_priv)6994 static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
6995 {
6996 	u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
6997 
6998 	I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
6999 
7000 	I915_WRITE(ILK_DISPLAY_CHICKEN2,
7001 		   I915_READ(ILK_DISPLAY_CHICKEN2) |
7002 		   ILK_ELPIN_409_SELECT);
7003 
7004 	I915_WRITE(GEN6_UCGCTL1,
7005 		   I915_READ(GEN6_UCGCTL1) |
7006 		   GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
7007 		   GEN6_CSUNIT_CLOCK_GATE_DISABLE);
7008 
7009 	/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
7010 	 * gating disable must be set.  Failure to set it results in
7011 	 * flickering pixels due to Z write ordering failures after
7012 	 * some amount of runtime in the Mesa "fire" demo, and Unigine
7013 	 * Sanctuary and Tropics, and apparently anything else with
7014 	 * alpha test or pixel discard.
7015 	 *
7016 	 * According to the spec, bit 11 (RCCUNIT) must also be set,
7017 	 * but we didn't debug actual testcases to find it out.
7018 	 *
7019 	 * WaDisableRCCUnitClockGating:snb
7020 	 * WaDisableRCPBUnitClockGating:snb
7021 	 */
7022 	I915_WRITE(GEN6_UCGCTL2,
7023 		   GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
7024 		   GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
7025 
7026 	/*
7027 	 * According to the spec the following bits should be
7028 	 * set in order to enable memory self-refresh and fbc:
7029 	 * The bit21 and bit22 of 0x42000
7030 	 * The bit21 and bit22 of 0x42004
7031 	 * The bit5 and bit7 of 0x42020
7032 	 * The bit14 of 0x70180
7033 	 * The bit14 of 0x71180
7034 	 *
7035 	 * WaFbcAsynchFlipDisableFbcQueue:snb
7036 	 */
7037 	I915_WRITE(ILK_DISPLAY_CHICKEN1,
7038 		   I915_READ(ILK_DISPLAY_CHICKEN1) |
7039 		   ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
7040 	I915_WRITE(ILK_DISPLAY_CHICKEN2,
7041 		   I915_READ(ILK_DISPLAY_CHICKEN2) |
7042 		   ILK_DPARB_GATE | ILK_VSDPFD_FULL);
7043 	I915_WRITE(ILK_DSPCLK_GATE_D,
7044 		   I915_READ(ILK_DSPCLK_GATE_D) |
7045 		   ILK_DPARBUNIT_CLOCK_GATE_ENABLE  |
7046 		   ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
7047 
7048 	g4x_disable_trickle_feed(dev_priv);
7049 
7050 	cpt_init_clock_gating(dev_priv);
7051 
7052 	gen6_check_mch_setup(dev_priv);
7053 }
7054 
lpt_init_clock_gating(struct drm_i915_private * dev_priv)7055 static void lpt_init_clock_gating(struct drm_i915_private *dev_priv)
7056 {
7057 	/*
7058 	 * TODO: this bit should only be enabled when really needed, then
7059 	 * disabled when not needed anymore in order to save power.
7060 	 */
7061 	if (HAS_PCH_LPT_LP(dev_priv))
7062 		I915_WRITE(SOUTH_DSPCLK_GATE_D,
7063 			   I915_READ(SOUTH_DSPCLK_GATE_D) |
7064 			   PCH_LP_PARTITION_LEVEL_DISABLE);
7065 
7066 	/* WADPOClockGatingDisable:hsw */
7067 	I915_WRITE(TRANS_CHICKEN1(PIPE_A),
7068 		   I915_READ(TRANS_CHICKEN1(PIPE_A)) |
7069 		   TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
7070 }
7071 
lpt_suspend_hw(struct drm_i915_private * dev_priv)7072 static void lpt_suspend_hw(struct drm_i915_private *dev_priv)
7073 {
7074 	if (HAS_PCH_LPT_LP(dev_priv)) {
7075 		u32 val = I915_READ(SOUTH_DSPCLK_GATE_D);
7076 
7077 		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
7078 		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
7079 	}
7080 }
7081 
gen8_set_l3sqc_credits(struct drm_i915_private * dev_priv,int general_prio_credits,int high_prio_credits)7082 static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
7083 				   int general_prio_credits,
7084 				   int high_prio_credits)
7085 {
7086 	u32 misccpctl;
7087 	u32 val;
7088 
7089 	/* WaTempDisableDOPClkGating:bdw */
7090 	misccpctl = I915_READ(GEN7_MISCCPCTL);
7091 	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
7092 
7093 	val = I915_READ(GEN8_L3SQCREG1);
7094 	val &= ~L3_PRIO_CREDITS_MASK;
7095 	val |= L3_GENERAL_PRIO_CREDITS(general_prio_credits);
7096 	val |= L3_HIGH_PRIO_CREDITS(high_prio_credits);
7097 	I915_WRITE(GEN8_L3SQCREG1, val);
7098 
7099 	/*
7100 	 * Wait at least 100 clocks before re-enabling clock gating.
7101 	 * See the definition of L3SQCREG1 in BSpec.
7102 	 */
7103 	POSTING_READ(GEN8_L3SQCREG1);
7104 	udelay(1);
7105 	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
7106 }
7107 
icl_init_clock_gating(struct drm_i915_private * dev_priv)7108 static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
7109 {
7110 	/* Wa_1409120013:icl,ehl */
7111 	I915_WRITE(ILK_DPFC_CHICKEN,
7112 		   ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
7113 
7114 	/* This is not an Wa. Enable to reduce Sampler power */
7115 	I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN,
7116 		   I915_READ(GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE);
7117 
7118 	/*Wa_14010594013:icl, ehl */
7119 	intel_uncore_rmw(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1,
7120 			 0, CNL_DELAY_PMRSP);
7121 }
7122 
tgl_init_clock_gating(struct drm_i915_private * dev_priv)7123 static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
7124 {
7125 	/* Wa_1409120013:tgl */
7126 	I915_WRITE(ILK_DPFC_CHICKEN,
7127 		   ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
7128 
7129 	/* Wa_1409825376:tgl (pre-prod)*/
7130 	if (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B1))
7131 		I915_WRITE(GEN9_CLKGATE_DIS_3, I915_READ(GEN9_CLKGATE_DIS_3) |
7132 			   TGL_VRH_GATING_DIS);
7133 
7134 	/* Wa_14011059788:tgl */
7135 	intel_uncore_rmw(&dev_priv->uncore, GEN10_DFR_RATIO_EN_AND_CHICKEN,
7136 			 0, DFR_DISABLE);
7137 }
7138 
cnp_init_clock_gating(struct drm_i915_private * dev_priv)7139 static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
7140 {
7141 	if (!HAS_PCH_CNP(dev_priv))
7142 		return;
7143 
7144 	/* Display WA #1181 WaSouthDisplayDisablePWMCGEGating: cnp */
7145 	I915_WRITE(SOUTH_DSPCLK_GATE_D, I915_READ(SOUTH_DSPCLK_GATE_D) |
7146 		   CNP_PWM_CGE_GATING_DISABLE);
7147 }
7148 
cnl_init_clock_gating(struct drm_i915_private * dev_priv)7149 static void cnl_init_clock_gating(struct drm_i915_private *dev_priv)
7150 {
7151 	u32 val;
7152 	cnp_init_clock_gating(dev_priv);
7153 
7154 	/* This is not an Wa. Enable for better image quality */
7155 	I915_WRITE(_3D_CHICKEN3,
7156 		   _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE));
7157 
7158 	/* WaEnableChickenDCPR:cnl */
7159 	I915_WRITE(GEN8_CHICKEN_DCPR_1,
7160 		   I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
7161 
7162 	/*
7163 	 * WaFbcWakeMemOn:cnl
7164 	 * Display WA #0859: cnl
7165 	 */
7166 	I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
7167 		   DISP_FBC_MEMORY_WAKE);
7168 
7169 	val = I915_READ(SLICE_UNIT_LEVEL_CLKGATE);
7170 	/* ReadHitWriteOnlyDisable:cnl */
7171 	val |= RCCUNIT_CLKGATE_DIS;
7172 	I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE, val);
7173 
7174 	/* Wa_2201832410:cnl */
7175 	val = I915_READ(SUBSLICE_UNIT_LEVEL_CLKGATE);
7176 	val |= GWUNIT_CLKGATE_DIS;
7177 	I915_WRITE(SUBSLICE_UNIT_LEVEL_CLKGATE, val);
7178 
7179 	/* WaDisableVFclkgate:cnl */
7180 	/* WaVFUnitClockGatingDisable:cnl */
7181 	val = I915_READ(UNSLICE_UNIT_LEVEL_CLKGATE);
7182 	val |= VFUNIT_CLKGATE_DIS;
7183 	I915_WRITE(UNSLICE_UNIT_LEVEL_CLKGATE, val);
7184 }
7185 
cfl_init_clock_gating(struct drm_i915_private * dev_priv)7186 static void cfl_init_clock_gating(struct drm_i915_private *dev_priv)
7187 {
7188 	cnp_init_clock_gating(dev_priv);
7189 	gen9_init_clock_gating(dev_priv);
7190 
7191 	/*
7192 	 * WaFbcTurnOffFbcWatermark:cfl
7193 	 * Display WA #0562: cfl
7194 	 */
7195 	I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
7196 		   DISP_FBC_WM_DIS);
7197 
7198 	/*
7199 	 * WaFbcNukeOnHostModify:cfl
7200 	 * Display WA #0873: cfl
7201 	 */
7202 	I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
7203 		   ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
7204 }
7205 
kbl_init_clock_gating(struct drm_i915_private * dev_priv)7206 static void kbl_init_clock_gating(struct drm_i915_private *dev_priv)
7207 {
7208 	gen9_init_clock_gating(dev_priv);
7209 
7210 	/* WaDisableSDEUnitClockGating:kbl */
7211 	if (IS_KBL_GT_REVID(dev_priv, 0, KBL_REVID_B0))
7212 		I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
7213 			   GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
7214 
7215 	/* WaDisableGamClockGating:kbl */
7216 	if (IS_KBL_GT_REVID(dev_priv, 0, KBL_REVID_B0))
7217 		I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
7218 			   GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
7219 
7220 	/*
7221 	 * WaFbcTurnOffFbcWatermark:kbl
7222 	 * Display WA #0562: kbl
7223 	 */
7224 	I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
7225 		   DISP_FBC_WM_DIS);
7226 
7227 	/*
7228 	 * WaFbcNukeOnHostModify:kbl
7229 	 * Display WA #0873: kbl
7230 	 */
7231 	I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
7232 		   ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
7233 }
7234 
skl_init_clock_gating(struct drm_i915_private * dev_priv)7235 static void skl_init_clock_gating(struct drm_i915_private *dev_priv)
7236 {
7237 	gen9_init_clock_gating(dev_priv);
7238 
7239 	/* WaDisableDopClockGating:skl */
7240 	I915_WRITE(GEN7_MISCCPCTL, I915_READ(GEN7_MISCCPCTL) &
7241 		   ~GEN7_DOP_CLOCK_GATE_ENABLE);
7242 
7243 	/* WAC6entrylatency:skl */
7244 	I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
7245 		   FBC_LLC_FULLY_OPEN);
7246 
7247 	/*
7248 	 * WaFbcTurnOffFbcWatermark:skl
7249 	 * Display WA #0562: skl
7250 	 */
7251 	I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
7252 		   DISP_FBC_WM_DIS);
7253 
7254 	/*
7255 	 * WaFbcNukeOnHostModify:skl
7256 	 * Display WA #0873: skl
7257 	 */
7258 	I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
7259 		   ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
7260 
7261 	/*
7262 	 * WaFbcHighMemBwCorruptionAvoidance:skl
7263 	 * Display WA #0883: skl
7264 	 */
7265 	I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
7266 		   ILK_DPFC_DISABLE_DUMMY0);
7267 }
7268 
bdw_init_clock_gating(struct drm_i915_private * dev_priv)7269 static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
7270 {
7271 	enum pipe pipe;
7272 
7273 	/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
7274 	I915_WRITE(CHICKEN_PIPESL_1(PIPE_A),
7275 		   I915_READ(CHICKEN_PIPESL_1(PIPE_A)) |
7276 		   HSW_FBCQ_DIS);
7277 
7278 	/* WaSwitchSolVfFArbitrationPriority:bdw */
7279 	I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
7280 
7281 	/* WaPsrDPAMaskVBlankInSRD:bdw */
7282 	I915_WRITE(CHICKEN_PAR1_1,
7283 		   I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
7284 
7285 	/* WaPsrDPRSUnmaskVBlankInSRD:bdw */
7286 	for_each_pipe(dev_priv, pipe) {
7287 		I915_WRITE(CHICKEN_PIPESL_1(pipe),
7288 			   I915_READ(CHICKEN_PIPESL_1(pipe)) |
7289 			   BDW_DPRS_MASK_VBLANK_SRD);
7290 	}
7291 
7292 	/* WaVSRefCountFullforceMissDisable:bdw */
7293 	/* WaDSRefCountFullforceMissDisable:bdw */
7294 	I915_WRITE(GEN7_FF_THREAD_MODE,
7295 		   I915_READ(GEN7_FF_THREAD_MODE) &
7296 		   ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
7297 
7298 	I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
7299 		   _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
7300 
7301 	/* WaDisableSDEUnitClockGating:bdw */
7302 	I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
7303 		   GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
7304 
7305 	/* WaProgramL3SqcReg1Default:bdw */
7306 	gen8_set_l3sqc_credits(dev_priv, 30, 2);
7307 
7308 	/* WaKVMNotificationOnConfigChange:bdw */
7309 	I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1)
7310 		   | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT);
7311 
7312 	lpt_init_clock_gating(dev_priv);
7313 
7314 	/* WaDisableDopClockGating:bdw
7315 	 *
7316 	 * Also see the CHICKEN2 write in bdw_init_workarounds() to disable DOP
7317 	 * clock gating.
7318 	 */
7319 	I915_WRITE(GEN6_UCGCTL1,
7320 		   I915_READ(GEN6_UCGCTL1) | GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
7321 }
7322 
hsw_init_clock_gating(struct drm_i915_private * dev_priv)7323 static void hsw_init_clock_gating(struct drm_i915_private *dev_priv)
7324 {
7325 	/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
7326 	I915_WRITE(CHICKEN_PIPESL_1(PIPE_A),
7327 		   I915_READ(CHICKEN_PIPESL_1(PIPE_A)) |
7328 		   HSW_FBCQ_DIS);
7329 
7330 	/* This is required by WaCatErrorRejectionIssue:hsw */
7331 	I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
7332 		   I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
7333 		   GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
7334 
7335 	/* WaSwitchSolVfFArbitrationPriority:hsw */
7336 	I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
7337 
7338 	lpt_init_clock_gating(dev_priv);
7339 }
7340 
ivb_init_clock_gating(struct drm_i915_private * dev_priv)7341 static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
7342 {
7343 	u32 snpcr;
7344 
7345 	I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
7346 
7347 	/* WaFbcAsynchFlipDisableFbcQueue:ivb */
7348 	I915_WRITE(ILK_DISPLAY_CHICKEN1,
7349 		   I915_READ(ILK_DISPLAY_CHICKEN1) |
7350 		   ILK_FBCQ_DIS);
7351 
7352 	/* WaDisableBackToBackFlipFix:ivb */
7353 	I915_WRITE(IVB_CHICKEN3,
7354 		   CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
7355 		   CHICKEN3_DGMG_DONE_FIX_DISABLE);
7356 
7357 	if (IS_IVB_GT1(dev_priv))
7358 		I915_WRITE(GEN7_ROW_CHICKEN2,
7359 			   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7360 	else {
7361 		/* must write both registers */
7362 		I915_WRITE(GEN7_ROW_CHICKEN2,
7363 			   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7364 		I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
7365 			   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7366 	}
7367 
7368 	/*
7369 	 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
7370 	 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
7371 	 */
7372 	I915_WRITE(GEN6_UCGCTL2,
7373 		   GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
7374 
7375 	/* This is required by WaCatErrorRejectionIssue:ivb */
7376 	I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
7377 			I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
7378 			GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
7379 
7380 	g4x_disable_trickle_feed(dev_priv);
7381 
7382 	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
7383 	snpcr &= ~GEN6_MBC_SNPCR_MASK;
7384 	snpcr |= GEN6_MBC_SNPCR_MED;
7385 	I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
7386 
7387 	if (!HAS_PCH_NOP(dev_priv))
7388 		cpt_init_clock_gating(dev_priv);
7389 
7390 	gen6_check_mch_setup(dev_priv);
7391 }
7392 
vlv_init_clock_gating(struct drm_i915_private * dev_priv)7393 static void vlv_init_clock_gating(struct drm_i915_private *dev_priv)
7394 {
7395 	/* WaDisableBackToBackFlipFix:vlv */
7396 	I915_WRITE(IVB_CHICKEN3,
7397 		   CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
7398 		   CHICKEN3_DGMG_DONE_FIX_DISABLE);
7399 
7400 	/* WaDisableDopClockGating:vlv */
7401 	I915_WRITE(GEN7_ROW_CHICKEN2,
7402 		   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7403 
7404 	/* This is required by WaCatErrorRejectionIssue:vlv */
7405 	I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
7406 		   I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
7407 		   GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
7408 
7409 	/*
7410 	 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
7411 	 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
7412 	 */
7413 	I915_WRITE(GEN6_UCGCTL2,
7414 		   GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
7415 
7416 	/* WaDisableL3Bank2xClockGate:vlv
7417 	 * Disabling L3 clock gating- MMIO 940c[25] = 1
7418 	 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
7419 	I915_WRITE(GEN7_UCGCTL4,
7420 		   I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
7421 
7422 	/*
7423 	 * WaDisableVLVClockGating_VBIIssue:vlv
7424 	 * Disable clock gating on th GCFG unit to prevent a delay
7425 	 * in the reporting of vblank events.
7426 	 */
7427 	I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
7428 }
7429 
chv_init_clock_gating(struct drm_i915_private * dev_priv)7430 static void chv_init_clock_gating(struct drm_i915_private *dev_priv)
7431 {
7432 	/* WaVSRefCountFullforceMissDisable:chv */
7433 	/* WaDSRefCountFullforceMissDisable:chv */
7434 	I915_WRITE(GEN7_FF_THREAD_MODE,
7435 		   I915_READ(GEN7_FF_THREAD_MODE) &
7436 		   ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
7437 
7438 	/* WaDisableSemaphoreAndSyncFlipWait:chv */
7439 	I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
7440 		   _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
7441 
7442 	/* WaDisableCSUnitClockGating:chv */
7443 	I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
7444 		   GEN6_CSUNIT_CLOCK_GATE_DISABLE);
7445 
7446 	/* WaDisableSDEUnitClockGating:chv */
7447 	I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
7448 		   GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
7449 
7450 	/*
7451 	 * WaProgramL3SqcReg1Default:chv
7452 	 * See gfxspecs/Related Documents/Performance Guide/
7453 	 * LSQC Setting Recommendations.
7454 	 */
7455 	gen8_set_l3sqc_credits(dev_priv, 38, 2);
7456 }
7457 
g4x_init_clock_gating(struct drm_i915_private * dev_priv)7458 static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
7459 {
7460 	u32 dspclk_gate;
7461 
7462 	I915_WRITE(RENCLK_GATE_D1, 0);
7463 	I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
7464 		   GS_UNIT_CLOCK_GATE_DISABLE |
7465 		   CL_UNIT_CLOCK_GATE_DISABLE);
7466 	I915_WRITE(RAMCLK_GATE_D, 0);
7467 	dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
7468 		OVRUNIT_CLOCK_GATE_DISABLE |
7469 		OVCUNIT_CLOCK_GATE_DISABLE;
7470 	if (IS_GM45(dev_priv))
7471 		dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
7472 	I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
7473 
7474 	g4x_disable_trickle_feed(dev_priv);
7475 }
7476 
i965gm_init_clock_gating(struct drm_i915_private * dev_priv)7477 static void i965gm_init_clock_gating(struct drm_i915_private *dev_priv)
7478 {
7479 	struct intel_uncore *uncore = &dev_priv->uncore;
7480 
7481 	intel_uncore_write(uncore, RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
7482 	intel_uncore_write(uncore, RENCLK_GATE_D2, 0);
7483 	intel_uncore_write(uncore, DSPCLK_GATE_D, 0);
7484 	intel_uncore_write(uncore, RAMCLK_GATE_D, 0);
7485 	intel_uncore_write16(uncore, DEUC, 0);
7486 	intel_uncore_write(uncore,
7487 			   MI_ARB_STATE,
7488 			   _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
7489 }
7490 
i965g_init_clock_gating(struct drm_i915_private * dev_priv)7491 static void i965g_init_clock_gating(struct drm_i915_private *dev_priv)
7492 {
7493 	I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
7494 		   I965_RCC_CLOCK_GATE_DISABLE |
7495 		   I965_RCPB_CLOCK_GATE_DISABLE |
7496 		   I965_ISC_CLOCK_GATE_DISABLE |
7497 		   I965_FBC_CLOCK_GATE_DISABLE);
7498 	I915_WRITE(RENCLK_GATE_D2, 0);
7499 	I915_WRITE(MI_ARB_STATE,
7500 		   _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
7501 }
7502 
gen3_init_clock_gating(struct drm_i915_private * dev_priv)7503 static void gen3_init_clock_gating(struct drm_i915_private *dev_priv)
7504 {
7505 	u32 dstate = I915_READ(D_STATE);
7506 
7507 	dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
7508 		DSTATE_DOT_CLOCK_GATING;
7509 	I915_WRITE(D_STATE, dstate);
7510 
7511 	if (IS_PINEVIEW(dev_priv))
7512 		I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
7513 
7514 	/* IIR "flip pending" means done if this bit is set */
7515 	I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
7516 
7517 	/* interrupts should cause a wake up from C3 */
7518 	I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
7519 
7520 	/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
7521 	I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
7522 
7523 	I915_WRITE(MI_ARB_STATE,
7524 		   _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
7525 }
7526 
i85x_init_clock_gating(struct drm_i915_private * dev_priv)7527 static void i85x_init_clock_gating(struct drm_i915_private *dev_priv)
7528 {
7529 	I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
7530 
7531 	/* interrupts should cause a wake up from C3 */
7532 	I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
7533 		   _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
7534 
7535 	I915_WRITE(MEM_MODE,
7536 		   _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
7537 
7538 	/*
7539 	 * Have FBC ignore 3D activity since we use software
7540 	 * render tracking, and otherwise a pure 3D workload
7541 	 * (even if it just renders a single frame and then does
7542 	 * abosultely nothing) would not allow FBC to recompress
7543 	 * until a 2D blit occurs.
7544 	 */
7545 	I915_WRITE(SCPD0,
7546 		   _MASKED_BIT_ENABLE(SCPD_FBC_IGNORE_3D));
7547 }
7548 
i830_init_clock_gating(struct drm_i915_private * dev_priv)7549 static void i830_init_clock_gating(struct drm_i915_private *dev_priv)
7550 {
7551 	I915_WRITE(MEM_MODE,
7552 		   _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
7553 		   _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
7554 }
7555 
intel_init_clock_gating(struct drm_i915_private * dev_priv)7556 void intel_init_clock_gating(struct drm_i915_private *dev_priv)
7557 {
7558 	dev_priv->display.init_clock_gating(dev_priv);
7559 }
7560 
intel_suspend_hw(struct drm_i915_private * dev_priv)7561 void intel_suspend_hw(struct drm_i915_private *dev_priv)
7562 {
7563 	if (HAS_PCH_LPT(dev_priv))
7564 		lpt_suspend_hw(dev_priv);
7565 }
7566 
nop_init_clock_gating(struct drm_i915_private * dev_priv)7567 static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
7568 {
7569 	drm_dbg_kms(&dev_priv->drm,
7570 		    "No clock gating settings or workarounds applied.\n");
7571 }
7572 
7573 /**
7574  * intel_init_clock_gating_hooks - setup the clock gating hooks
7575  * @dev_priv: device private
7576  *
7577  * Setup the hooks that configure which clocks of a given platform can be
7578  * gated and also apply various GT and display specific workarounds for these
7579  * platforms. Note that some GT specific workarounds are applied separately
7580  * when GPU contexts or batchbuffers start their execution.
7581  */
intel_init_clock_gating_hooks(struct drm_i915_private * dev_priv)7582 void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
7583 {
7584 	if (IS_GEN(dev_priv, 12))
7585 		dev_priv->display.init_clock_gating = tgl_init_clock_gating;
7586 	else if (IS_GEN(dev_priv, 11))
7587 		dev_priv->display.init_clock_gating = icl_init_clock_gating;
7588 	else if (IS_CANNONLAKE(dev_priv))
7589 		dev_priv->display.init_clock_gating = cnl_init_clock_gating;
7590 	else if (IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv))
7591 		dev_priv->display.init_clock_gating = cfl_init_clock_gating;
7592 	else if (IS_SKYLAKE(dev_priv))
7593 		dev_priv->display.init_clock_gating = skl_init_clock_gating;
7594 	else if (IS_KABYLAKE(dev_priv))
7595 		dev_priv->display.init_clock_gating = kbl_init_clock_gating;
7596 	else if (IS_BROXTON(dev_priv))
7597 		dev_priv->display.init_clock_gating = bxt_init_clock_gating;
7598 	else if (IS_GEMINILAKE(dev_priv))
7599 		dev_priv->display.init_clock_gating = glk_init_clock_gating;
7600 	else if (IS_BROADWELL(dev_priv))
7601 		dev_priv->display.init_clock_gating = bdw_init_clock_gating;
7602 	else if (IS_CHERRYVIEW(dev_priv))
7603 		dev_priv->display.init_clock_gating = chv_init_clock_gating;
7604 	else if (IS_HASWELL(dev_priv))
7605 		dev_priv->display.init_clock_gating = hsw_init_clock_gating;
7606 	else if (IS_IVYBRIDGE(dev_priv))
7607 		dev_priv->display.init_clock_gating = ivb_init_clock_gating;
7608 	else if (IS_VALLEYVIEW(dev_priv))
7609 		dev_priv->display.init_clock_gating = vlv_init_clock_gating;
7610 	else if (IS_GEN(dev_priv, 6))
7611 		dev_priv->display.init_clock_gating = gen6_init_clock_gating;
7612 	else if (IS_GEN(dev_priv, 5))
7613 		dev_priv->display.init_clock_gating = ilk_init_clock_gating;
7614 	else if (IS_G4X(dev_priv))
7615 		dev_priv->display.init_clock_gating = g4x_init_clock_gating;
7616 	else if (IS_I965GM(dev_priv))
7617 		dev_priv->display.init_clock_gating = i965gm_init_clock_gating;
7618 	else if (IS_I965G(dev_priv))
7619 		dev_priv->display.init_clock_gating = i965g_init_clock_gating;
7620 	else if (IS_GEN(dev_priv, 3))
7621 		dev_priv->display.init_clock_gating = gen3_init_clock_gating;
7622 	else if (IS_I85X(dev_priv) || IS_I865G(dev_priv))
7623 		dev_priv->display.init_clock_gating = i85x_init_clock_gating;
7624 	else if (IS_GEN(dev_priv, 2))
7625 		dev_priv->display.init_clock_gating = i830_init_clock_gating;
7626 	else {
7627 		MISSING_CASE(INTEL_DEVID(dev_priv));
7628 		dev_priv->display.init_clock_gating = nop_init_clock_gating;
7629 	}
7630 }
7631 
7632 /* Set up chip specific power management-related functions */
intel_init_pm(struct drm_i915_private * dev_priv)7633 void intel_init_pm(struct drm_i915_private *dev_priv)
7634 {
7635 	/* For cxsr */
7636 	if (IS_PINEVIEW(dev_priv))
7637 		pnv_get_mem_freq(dev_priv);
7638 	else if (IS_GEN(dev_priv, 5))
7639 		ilk_get_mem_freq(dev_priv);
7640 
7641 	if (intel_has_sagv(dev_priv))
7642 		skl_setup_sagv_block_time(dev_priv);
7643 
7644 	/* For FIFO watermark updates */
7645 	if (INTEL_GEN(dev_priv) >= 9) {
7646 		skl_setup_wm_latency(dev_priv);
7647 		dev_priv->display.compute_global_watermarks = skl_compute_wm;
7648 	} else if (HAS_PCH_SPLIT(dev_priv)) {
7649 		ilk_setup_wm_latency(dev_priv);
7650 
7651 		if ((IS_GEN(dev_priv, 5) && dev_priv->wm.pri_latency[1] &&
7652 		     dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
7653 		    (!IS_GEN(dev_priv, 5) && dev_priv->wm.pri_latency[0] &&
7654 		     dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
7655 			dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
7656 			dev_priv->display.compute_intermediate_wm =
7657 				ilk_compute_intermediate_wm;
7658 			dev_priv->display.initial_watermarks =
7659 				ilk_initial_watermarks;
7660 			dev_priv->display.optimize_watermarks =
7661 				ilk_optimize_watermarks;
7662 		} else {
7663 			drm_dbg_kms(&dev_priv->drm,
7664 				    "Failed to read display plane latency. "
7665 				    "Disable CxSR\n");
7666 		}
7667 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
7668 		vlv_setup_wm_latency(dev_priv);
7669 		dev_priv->display.compute_pipe_wm = vlv_compute_pipe_wm;
7670 		dev_priv->display.compute_intermediate_wm = vlv_compute_intermediate_wm;
7671 		dev_priv->display.initial_watermarks = vlv_initial_watermarks;
7672 		dev_priv->display.optimize_watermarks = vlv_optimize_watermarks;
7673 		dev_priv->display.atomic_update_watermarks = vlv_atomic_update_fifo;
7674 	} else if (IS_G4X(dev_priv)) {
7675 		g4x_setup_wm_latency(dev_priv);
7676 		dev_priv->display.compute_pipe_wm = g4x_compute_pipe_wm;
7677 		dev_priv->display.compute_intermediate_wm = g4x_compute_intermediate_wm;
7678 		dev_priv->display.initial_watermarks = g4x_initial_watermarks;
7679 		dev_priv->display.optimize_watermarks = g4x_optimize_watermarks;
7680 	} else if (IS_PINEVIEW(dev_priv)) {
7681 		if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
7682 					    dev_priv->is_ddr3,
7683 					    dev_priv->fsb_freq,
7684 					    dev_priv->mem_freq)) {
7685 			drm_info(&dev_priv->drm,
7686 				 "failed to find known CxSR latency "
7687 				 "(found ddr%s fsb freq %d, mem freq %d), "
7688 				 "disabling CxSR\n",
7689 				 (dev_priv->is_ddr3 == 1) ? "3" : "2",
7690 				 dev_priv->fsb_freq, dev_priv->mem_freq);
7691 			/* Disable CxSR and never update its watermark again */
7692 			intel_set_memory_cxsr(dev_priv, false);
7693 			dev_priv->display.update_wm = NULL;
7694 		} else
7695 			dev_priv->display.update_wm = pnv_update_wm;
7696 	} else if (IS_GEN(dev_priv, 4)) {
7697 		dev_priv->display.update_wm = i965_update_wm;
7698 	} else if (IS_GEN(dev_priv, 3)) {
7699 		dev_priv->display.update_wm = i9xx_update_wm;
7700 		dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
7701 	} else if (IS_GEN(dev_priv, 2)) {
7702 		if (INTEL_NUM_PIPES(dev_priv) == 1) {
7703 			dev_priv->display.update_wm = i845_update_wm;
7704 			dev_priv->display.get_fifo_size = i845_get_fifo_size;
7705 		} else {
7706 			dev_priv->display.update_wm = i9xx_update_wm;
7707 			dev_priv->display.get_fifo_size = i830_get_fifo_size;
7708 		}
7709 	} else {
7710 		drm_err(&dev_priv->drm,
7711 			"unexpected fall-through in %s\n", __func__);
7712 	}
7713 }
7714 
intel_pm_setup(struct drm_i915_private * dev_priv)7715 void intel_pm_setup(struct drm_i915_private *dev_priv)
7716 {
7717 	dev_priv->runtime_pm.suspended = false;
7718 	atomic_set(&dev_priv->runtime_pm.wakeref_count, 0);
7719 }
7720 
intel_dbuf_duplicate_state(struct intel_global_obj * obj)7721 static struct intel_global_state *intel_dbuf_duplicate_state(struct intel_global_obj *obj)
7722 {
7723 	struct intel_dbuf_state *dbuf_state;
7724 
7725 	dbuf_state = kmemdup(obj->state, sizeof(*dbuf_state), GFP_KERNEL);
7726 	if (!dbuf_state)
7727 		return NULL;
7728 
7729 	return &dbuf_state->base;
7730 }
7731 
intel_dbuf_destroy_state(struct intel_global_obj * obj,struct intel_global_state * state)7732 static void intel_dbuf_destroy_state(struct intel_global_obj *obj,
7733 				     struct intel_global_state *state)
7734 {
7735 	kfree(state);
7736 }
7737 
7738 static const struct intel_global_state_funcs intel_dbuf_funcs = {
7739 	.atomic_duplicate_state = intel_dbuf_duplicate_state,
7740 	.atomic_destroy_state = intel_dbuf_destroy_state,
7741 };
7742 
7743 struct intel_dbuf_state *
intel_atomic_get_dbuf_state(struct intel_atomic_state * state)7744 intel_atomic_get_dbuf_state(struct intel_atomic_state *state)
7745 {
7746 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7747 	struct intel_global_state *dbuf_state;
7748 
7749 	dbuf_state = intel_atomic_get_global_obj_state(state, &dev_priv->dbuf.obj);
7750 	if (IS_ERR(dbuf_state))
7751 		return ERR_CAST(dbuf_state);
7752 
7753 	return to_intel_dbuf_state(dbuf_state);
7754 }
7755 
intel_dbuf_init(struct drm_i915_private * dev_priv)7756 int intel_dbuf_init(struct drm_i915_private *dev_priv)
7757 {
7758 	struct intel_dbuf_state *dbuf_state;
7759 
7760 	dbuf_state = kzalloc(sizeof(*dbuf_state), GFP_KERNEL);
7761 	if (!dbuf_state)
7762 		return -ENOMEM;
7763 
7764 	intel_atomic_global_obj_init(dev_priv, &dev_priv->dbuf.obj,
7765 				     &dbuf_state->base, &intel_dbuf_funcs);
7766 
7767 	return 0;
7768 }
7769 
intel_dbuf_pre_plane_update(struct intel_atomic_state * state)7770 void intel_dbuf_pre_plane_update(struct intel_atomic_state *state)
7771 {
7772 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7773 	const struct intel_dbuf_state *new_dbuf_state =
7774 		intel_atomic_get_new_dbuf_state(state);
7775 	const struct intel_dbuf_state *old_dbuf_state =
7776 		intel_atomic_get_old_dbuf_state(state);
7777 
7778 	if (!new_dbuf_state ||
7779 	    new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices)
7780 		return;
7781 
7782 	WARN_ON(!new_dbuf_state->base.changed);
7783 
7784 	gen9_dbuf_slices_update(dev_priv,
7785 				old_dbuf_state->enabled_slices |
7786 				new_dbuf_state->enabled_slices);
7787 }
7788 
intel_dbuf_post_plane_update(struct intel_atomic_state * state)7789 void intel_dbuf_post_plane_update(struct intel_atomic_state *state)
7790 {
7791 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7792 	const struct intel_dbuf_state *new_dbuf_state =
7793 		intel_atomic_get_new_dbuf_state(state);
7794 	const struct intel_dbuf_state *old_dbuf_state =
7795 		intel_atomic_get_old_dbuf_state(state);
7796 
7797 	if (!new_dbuf_state ||
7798 	    new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices)
7799 		return;
7800 
7801 	WARN_ON(!new_dbuf_state->base.changed);
7802 
7803 	gen9_dbuf_slices_update(dev_priv,
7804 				new_dbuf_state->enabled_slices);
7805 }
7806