1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2020 Intel Corporation
4 */
5 #include "intel_de.h"
6 #include "intel_display_types.h"
7 #include "intel_fb.h"
8 #include "skl_scaler.h"
9 #include "skl_universal_plane.h"
10
11 /*
12 * The hardware phase 0.0 refers to the center of the pixel.
13 * We want to start from the top/left edge which is phase
14 * -0.5. That matches how the hardware calculates the scaling
15 * factors (from top-left of the first pixel to bottom-right
16 * of the last pixel, as opposed to the pixel centers).
17 *
18 * For 4:2:0 subsampled chroma planes we obviously have to
19 * adjust that so that the chroma sample position lands in
20 * the right spot.
21 *
22 * Note that for packed YCbCr 4:2:2 formats there is no way to
23 * control chroma siting. The hardware simply replicates the
24 * chroma samples for both of the luma samples, and thus we don't
25 * actually get the expected MPEG2 chroma siting convention :(
26 * The same behaviour is observed on pre-SKL platforms as well.
27 *
28 * Theory behind the formula (note that we ignore sub-pixel
29 * source coordinates):
30 * s = source sample position
31 * d = destination sample position
32 *
33 * Downscaling 4:1:
34 * -0.5
35 * | 0.0
36 * | | 1.5 (initial phase)
37 * | | |
38 * v v v
39 * | s | s | s | s |
40 * | d |
41 *
42 * Upscaling 1:4:
43 * -0.5
44 * | -0.375 (initial phase)
45 * | | 0.0
46 * | | |
47 * v v v
48 * | s |
49 * | d | d | d | d |
50 */
skl_scaler_calc_phase(int sub,int scale,bool chroma_cosited)51 static u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
52 {
53 int phase = -0x8000;
54 u16 trip = 0;
55
56 if (chroma_cosited)
57 phase += (sub - 1) * 0x8000 / sub;
58
59 phase += scale / (2 * sub);
60
61 /*
62 * Hardware initial phase limited to [-0.5:1.5].
63 * Since the max hardware scale factor is 3.0, we
64 * should never actually excdeed 1.0 here.
65 */
66 WARN_ON(phase < -0x8000 || phase > 0x18000);
67
68 if (phase < 0)
69 phase = 0x10000 + phase;
70 else
71 trip = PS_PHASE_TRIP;
72
73 return ((phase >> 2) & PS_PHASE_MASK) | trip;
74 }
75
76 #define SKL_MIN_SRC_W 8
77 #define SKL_MAX_SRC_W 4096
78 #define SKL_MIN_SRC_H 8
79 #define SKL_MAX_SRC_H 4096
80 #define SKL_MIN_DST_W 8
81 #define SKL_MAX_DST_W 4096
82 #define SKL_MIN_DST_H 8
83 #define SKL_MAX_DST_H 4096
84 #define ICL_MAX_SRC_W 5120
85 #define ICL_MAX_SRC_H 4096
86 #define ICL_MAX_DST_W 5120
87 #define ICL_MAX_DST_H 4096
88 #define MTL_MAX_SRC_W 4096
89 #define MTL_MAX_SRC_H 8192
90 #define MTL_MAX_DST_W 8192
91 #define MTL_MAX_DST_H 8192
92 #define SKL_MIN_YUV_420_SRC_W 16
93 #define SKL_MIN_YUV_420_SRC_H 16
94
95 static int
skl_update_scaler(struct intel_crtc_state * crtc_state,bool force_detach,unsigned int scaler_user,int * scaler_id,int src_w,int src_h,int dst_w,int dst_h,const struct drm_format_info * format,u64 modifier,bool need_scaler)96 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
97 unsigned int scaler_user, int *scaler_id,
98 int src_w, int src_h, int dst_w, int dst_h,
99 const struct drm_format_info *format,
100 u64 modifier, bool need_scaler)
101 {
102 struct intel_crtc_scaler_state *scaler_state =
103 &crtc_state->scaler_state;
104 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
105 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
106 const struct drm_display_mode *adjusted_mode =
107 &crtc_state->hw.adjusted_mode;
108 int pipe_src_w = drm_rect_width(&crtc_state->pipe_src);
109 int pipe_src_h = drm_rect_height(&crtc_state->pipe_src);
110 int min_src_w, min_src_h, min_dst_w, min_dst_h;
111 int max_src_w, max_src_h, max_dst_w, max_dst_h;
112
113 /*
114 * Src coordinates are already rotated by 270 degrees for
115 * the 90/270 degree plane rotation cases (to match the
116 * GTT mapping), hence no need to account for rotation here.
117 */
118 if (src_w != dst_w || src_h != dst_h)
119 need_scaler = true;
120
121 /*
122 * Scaling/fitting not supported in IF-ID mode in GEN9+
123 * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
124 * Once NV12 is enabled, handle it here while allocating scaler
125 * for NV12.
126 */
127 if (DISPLAY_VER(dev_priv) >= 9 && crtc_state->hw.enable &&
128 need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
129 drm_dbg_kms(&dev_priv->drm,
130 "Pipe/Plane scaling not supported with IF-ID mode\n");
131 return -EINVAL;
132 }
133
134 /*
135 * if plane is being disabled or scaler is no more required or force detach
136 * - free scaler binded to this plane/crtc
137 * - in order to do this, update crtc->scaler_usage
138 *
139 * Here scaler state in crtc_state is set free so that
140 * scaler can be assigned to other user. Actual register
141 * update to free the scaler is done in plane/panel-fit programming.
142 * For this purpose crtc/plane_state->scaler_id isn't reset here.
143 */
144 if (force_detach || !need_scaler) {
145 if (*scaler_id >= 0) {
146 scaler_state->scaler_users &= ~(1 << scaler_user);
147 scaler_state->scalers[*scaler_id].in_use = 0;
148
149 drm_dbg_kms(&dev_priv->drm,
150 "scaler_user index %u.%u: "
151 "Staged freeing scaler id %d scaler_users = 0x%x\n",
152 crtc->pipe, scaler_user, *scaler_id,
153 scaler_state->scaler_users);
154 *scaler_id = -1;
155 }
156 return 0;
157 }
158
159 if (format && intel_format_info_is_yuv_semiplanar(format, modifier) &&
160 (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
161 drm_dbg_kms(&dev_priv->drm,
162 "Planar YUV: src dimensions not met\n");
163 return -EINVAL;
164 }
165
166 min_src_w = SKL_MIN_SRC_W;
167 min_src_h = SKL_MIN_SRC_H;
168 min_dst_w = SKL_MIN_DST_W;
169 min_dst_h = SKL_MIN_DST_H;
170
171 if (DISPLAY_VER(dev_priv) < 11) {
172 max_src_w = SKL_MAX_SRC_W;
173 max_src_h = SKL_MAX_SRC_H;
174 max_dst_w = SKL_MAX_DST_W;
175 max_dst_h = SKL_MAX_DST_H;
176 } else if (DISPLAY_VER(dev_priv) < 14) {
177 max_src_w = ICL_MAX_SRC_W;
178 max_src_h = ICL_MAX_SRC_H;
179 max_dst_w = ICL_MAX_DST_W;
180 max_dst_h = ICL_MAX_DST_H;
181 } else {
182 max_src_w = MTL_MAX_SRC_W;
183 max_src_h = MTL_MAX_SRC_H;
184 max_dst_w = MTL_MAX_DST_W;
185 max_dst_h = MTL_MAX_DST_H;
186 }
187
188 /* range checks */
189 if (src_w < min_src_w || src_h < min_src_h ||
190 dst_w < min_dst_w || dst_h < min_dst_h ||
191 src_w > max_src_w || src_h > max_src_h ||
192 dst_w > max_dst_w || dst_h > max_dst_h) {
193 drm_dbg_kms(&dev_priv->drm,
194 "scaler_user index %u.%u: src %ux%u dst %ux%u "
195 "size is out of scaler range\n",
196 crtc->pipe, scaler_user, src_w, src_h,
197 dst_w, dst_h);
198 return -EINVAL;
199 }
200
201 /*
202 * The pipe scaler does not use all the bits of PIPESRC, at least
203 * on the earlier platforms. So even when we're scaling a plane
204 * the *pipe* source size must not be too large. For simplicity
205 * we assume the limits match the scaler source size limits. Might
206 * not be 100% accurate on all platforms, but good enough for now.
207 */
208 if (pipe_src_w > max_src_w || pipe_src_h > max_src_h) {
209 drm_dbg_kms(&dev_priv->drm,
210 "scaler_user index %u.%u: pipe src size %ux%u "
211 "is out of scaler range\n",
212 crtc->pipe, scaler_user, pipe_src_w, pipe_src_h);
213 return -EINVAL;
214 }
215
216 /* mark this plane as a scaler user in crtc_state */
217 scaler_state->scaler_users |= (1 << scaler_user);
218 drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: "
219 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
220 crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
221 scaler_state->scaler_users);
222
223 return 0;
224 }
225
skl_update_scaler_crtc(struct intel_crtc_state * crtc_state)226 int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state)
227 {
228 const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
229 int width, height;
230
231 if (crtc_state->pch_pfit.enabled) {
232 width = drm_rect_width(&crtc_state->pch_pfit.dst);
233 height = drm_rect_height(&crtc_state->pch_pfit.dst);
234 } else {
235 width = pipe_mode->crtc_hdisplay;
236 height = pipe_mode->crtc_vdisplay;
237 }
238 return skl_update_scaler(crtc_state, !crtc_state->hw.active,
239 SKL_CRTC_INDEX,
240 &crtc_state->scaler_state.scaler_id,
241 drm_rect_width(&crtc_state->pipe_src),
242 drm_rect_height(&crtc_state->pipe_src),
243 width, height, NULL, 0,
244 crtc_state->pch_pfit.enabled);
245 }
246
247 /**
248 * skl_update_scaler_plane - Stages update to scaler state for a given plane.
249 * @crtc_state: crtc's scaler state
250 * @plane_state: atomic plane state to update
251 *
252 * Return
253 * 0 - scaler_usage updated successfully
254 * error - requested scaling cannot be supported or other error condition
255 */
skl_update_scaler_plane(struct intel_crtc_state * crtc_state,struct intel_plane_state * plane_state)256 int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
257 struct intel_plane_state *plane_state)
258 {
259 struct intel_plane *intel_plane =
260 to_intel_plane(plane_state->uapi.plane);
261 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
262 struct drm_framebuffer *fb = plane_state->hw.fb;
263 int ret;
264 bool force_detach = !fb || !plane_state->uapi.visible;
265 bool need_scaler = false;
266
267 /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
268 if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
269 fb && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
270 need_scaler = true;
271
272 ret = skl_update_scaler(crtc_state, force_detach,
273 drm_plane_index(&intel_plane->base),
274 &plane_state->scaler_id,
275 drm_rect_width(&plane_state->uapi.src) >> 16,
276 drm_rect_height(&plane_state->uapi.src) >> 16,
277 drm_rect_width(&plane_state->uapi.dst),
278 drm_rect_height(&plane_state->uapi.dst),
279 fb ? fb->format : NULL,
280 fb ? fb->modifier : 0,
281 need_scaler);
282
283 if (ret || plane_state->scaler_id < 0)
284 return ret;
285
286 /* check colorkey */
287 if (plane_state->ckey.flags) {
288 drm_dbg_kms(&dev_priv->drm,
289 "[PLANE:%d:%s] scaling with color key not allowed",
290 intel_plane->base.base.id,
291 intel_plane->base.name);
292 return -EINVAL;
293 }
294
295 /* Check src format */
296 switch (fb->format->format) {
297 case DRM_FORMAT_RGB565:
298 case DRM_FORMAT_XBGR8888:
299 case DRM_FORMAT_XRGB8888:
300 case DRM_FORMAT_ABGR8888:
301 case DRM_FORMAT_ARGB8888:
302 case DRM_FORMAT_XRGB2101010:
303 case DRM_FORMAT_XBGR2101010:
304 case DRM_FORMAT_ARGB2101010:
305 case DRM_FORMAT_ABGR2101010:
306 case DRM_FORMAT_YUYV:
307 case DRM_FORMAT_YVYU:
308 case DRM_FORMAT_UYVY:
309 case DRM_FORMAT_VYUY:
310 case DRM_FORMAT_NV12:
311 case DRM_FORMAT_XYUV8888:
312 case DRM_FORMAT_P010:
313 case DRM_FORMAT_P012:
314 case DRM_FORMAT_P016:
315 case DRM_FORMAT_Y210:
316 case DRM_FORMAT_Y212:
317 case DRM_FORMAT_Y216:
318 case DRM_FORMAT_XVYU2101010:
319 case DRM_FORMAT_XVYU12_16161616:
320 case DRM_FORMAT_XVYU16161616:
321 break;
322 case DRM_FORMAT_XBGR16161616F:
323 case DRM_FORMAT_ABGR16161616F:
324 case DRM_FORMAT_XRGB16161616F:
325 case DRM_FORMAT_ARGB16161616F:
326 if (DISPLAY_VER(dev_priv) >= 11)
327 break;
328 fallthrough;
329 default:
330 drm_dbg_kms(&dev_priv->drm,
331 "[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
332 intel_plane->base.base.id, intel_plane->base.name,
333 fb->base.id, fb->format->format);
334 return -EINVAL;
335 }
336
337 return 0;
338 }
339
intel_atomic_setup_scaler(struct intel_crtc_scaler_state * scaler_state,int num_scalers_need,struct intel_crtc * intel_crtc,const char * name,int idx,struct intel_plane_state * plane_state,int * scaler_id)340 static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_state,
341 int num_scalers_need, struct intel_crtc *intel_crtc,
342 const char *name, int idx,
343 struct intel_plane_state *plane_state,
344 int *scaler_id)
345 {
346 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
347 int j;
348 u32 mode;
349
350 if (*scaler_id < 0) {
351 /* find a free scaler */
352 for (j = 0; j < intel_crtc->num_scalers; j++) {
353 if (scaler_state->scalers[j].in_use)
354 continue;
355
356 *scaler_id = j;
357 scaler_state->scalers[*scaler_id].in_use = 1;
358 break;
359 }
360 }
361
362 if (drm_WARN(&dev_priv->drm, *scaler_id < 0,
363 "Cannot find scaler for %s:%d\n", name, idx))
364 return -EINVAL;
365
366 /* set scaler mode */
367 if (plane_state && plane_state->hw.fb &&
368 plane_state->hw.fb->format->is_yuv &&
369 plane_state->hw.fb->format->num_planes > 1) {
370 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
371
372 if (DISPLAY_VER(dev_priv) == 9) {
373 mode = SKL_PS_SCALER_MODE_NV12;
374 } else if (icl_is_hdr_plane(dev_priv, plane->id)) {
375 /*
376 * On gen11+'s HDR planes we only use the scaler for
377 * scaling. They have a dedicated chroma upsampler, so
378 * we don't need the scaler to upsample the UV plane.
379 */
380 mode = PS_SCALER_MODE_NORMAL;
381 } else {
382 struct intel_plane *linked =
383 plane_state->planar_linked_plane;
384
385 mode = PS_SCALER_MODE_PLANAR;
386
387 if (linked)
388 mode |= PS_PLANE_Y_SEL(linked->id);
389 }
390 } else if (DISPLAY_VER(dev_priv) >= 10) {
391 mode = PS_SCALER_MODE_NORMAL;
392 } else if (num_scalers_need == 1 && intel_crtc->num_scalers > 1) {
393 /*
394 * when only 1 scaler is in use on a pipe with 2 scalers
395 * scaler 0 operates in high quality (HQ) mode.
396 * In this case use scaler 0 to take advantage of HQ mode
397 */
398 scaler_state->scalers[*scaler_id].in_use = 0;
399 *scaler_id = 0;
400 scaler_state->scalers[0].in_use = 1;
401 mode = SKL_PS_SCALER_MODE_HQ;
402 } else {
403 mode = SKL_PS_SCALER_MODE_DYN;
404 }
405
406 /*
407 * FIXME: we should also check the scaler factors for pfit, so
408 * this shouldn't be tied directly to planes.
409 */
410 if (plane_state && plane_state->hw.fb) {
411 const struct drm_framebuffer *fb = plane_state->hw.fb;
412 const struct drm_rect *src = &plane_state->uapi.src;
413 const struct drm_rect *dst = &plane_state->uapi.dst;
414 int hscale, vscale, max_vscale, max_hscale;
415
416 /*
417 * FIXME: When two scalers are needed, but only one of
418 * them needs to downscale, we should make sure that
419 * the one that needs downscaling support is assigned
420 * as the first scaler, so we don't reject downscaling
421 * unnecessarily.
422 */
423
424 if (DISPLAY_VER(dev_priv) >= 14) {
425 /*
426 * On versions 14 and up, only the first
427 * scaler supports a vertical scaling factor
428 * of more than 1.0, while a horizontal
429 * scaling factor of 3.0 is supported.
430 */
431 max_hscale = 0x30000 - 1;
432 if (*scaler_id == 0)
433 max_vscale = 0x30000 - 1;
434 else
435 max_vscale = 0x10000;
436
437 } else if (DISPLAY_VER(dev_priv) >= 10 ||
438 !intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) {
439 max_hscale = 0x30000 - 1;
440 max_vscale = 0x30000 - 1;
441 } else {
442 max_hscale = 0x20000 - 1;
443 max_vscale = 0x20000 - 1;
444 }
445
446 /*
447 * FIXME: We should change the if-else block above to
448 * support HQ vs dynamic scaler properly.
449 */
450
451 /* Check if required scaling is within limits */
452 hscale = drm_rect_calc_hscale(src, dst, 1, max_hscale);
453 vscale = drm_rect_calc_vscale(src, dst, 1, max_vscale);
454
455 if (hscale < 0 || vscale < 0) {
456 drm_dbg_kms(&dev_priv->drm,
457 "Scaler %d doesn't support required plane scaling\n",
458 *scaler_id);
459 drm_rect_debug_print("src: ", src, true);
460 drm_rect_debug_print("dst: ", dst, false);
461
462 return -EINVAL;
463 }
464 }
465
466 drm_dbg_kms(&dev_priv->drm, "Attached scaler id %u.%u to %s:%d\n",
467 intel_crtc->pipe, *scaler_id, name, idx);
468 scaler_state->scalers[*scaler_id].mode = mode;
469
470 return 0;
471 }
472
473 /**
474 * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests
475 * @dev_priv: i915 device
476 * @intel_crtc: intel crtc
477 * @crtc_state: incoming crtc_state to validate and setup scalers
478 *
479 * This function sets up scalers based on staged scaling requests for
480 * a @crtc and its planes. It is called from crtc level check path. If request
481 * is a supportable request, it attaches scalers to requested planes and crtc.
482 *
483 * This function takes into account the current scaler(s) in use by any planes
484 * not being part of this atomic state
485 *
486 * Returns:
487 * 0 - scalers were setup successfully
488 * error code - otherwise
489 */
intel_atomic_setup_scalers(struct drm_i915_private * dev_priv,struct intel_crtc * intel_crtc,struct intel_crtc_state * crtc_state)490 int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
491 struct intel_crtc *intel_crtc,
492 struct intel_crtc_state *crtc_state)
493 {
494 struct drm_plane *plane = NULL;
495 struct intel_plane *intel_plane;
496 struct intel_crtc_scaler_state *scaler_state =
497 &crtc_state->scaler_state;
498 struct drm_atomic_state *drm_state = crtc_state->uapi.state;
499 struct intel_atomic_state *intel_state = to_intel_atomic_state(drm_state);
500 int num_scalers_need;
501 int i;
502
503 num_scalers_need = hweight32(scaler_state->scaler_users);
504
505 /*
506 * High level flow:
507 * - staged scaler requests are already in scaler_state->scaler_users
508 * - check whether staged scaling requests can be supported
509 * - add planes using scalers that aren't in current transaction
510 * - assign scalers to requested users
511 * - as part of plane commit, scalers will be committed
512 * (i.e., either attached or detached) to respective planes in hw
513 * - as part of crtc_commit, scaler will be either attached or detached
514 * to crtc in hw
515 */
516
517 /* fail if required scalers > available scalers */
518 if (num_scalers_need > intel_crtc->num_scalers) {
519 drm_dbg_kms(&dev_priv->drm,
520 "Too many scaling requests %d > %d\n",
521 num_scalers_need, intel_crtc->num_scalers);
522 return -EINVAL;
523 }
524
525 /* walkthrough scaler_users bits and start assigning scalers */
526 for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) {
527 struct intel_plane_state *plane_state = NULL;
528 int *scaler_id;
529 const char *name;
530 int idx, ret;
531
532 /* skip if scaler not required */
533 if (!(scaler_state->scaler_users & (1 << i)))
534 continue;
535
536 if (i == SKL_CRTC_INDEX) {
537 name = "CRTC";
538 idx = intel_crtc->base.base.id;
539
540 /* panel fitter case: assign as a crtc scaler */
541 scaler_id = &scaler_state->scaler_id;
542 } else {
543 name = "PLANE";
544
545 /* plane scaler case: assign as a plane scaler */
546 /* find the plane that set the bit as scaler_user */
547 plane = drm_state->planes[i].ptr;
548
549 /*
550 * to enable/disable hq mode, add planes that are using scaler
551 * into this transaction
552 */
553 if (!plane) {
554 struct drm_plane_state *state;
555
556 /*
557 * GLK+ scalers don't have a HQ mode so it
558 * isn't necessary to change between HQ and dyn mode
559 * on those platforms.
560 */
561 if (DISPLAY_VER(dev_priv) >= 10)
562 continue;
563
564 plane = drm_plane_from_index(&dev_priv->drm, i);
565 state = drm_atomic_get_plane_state(drm_state, plane);
566 if (IS_ERR(state)) {
567 drm_dbg_kms(&dev_priv->drm,
568 "Failed to add [PLANE:%d] to drm_state\n",
569 plane->base.id);
570 return PTR_ERR(state);
571 }
572 }
573
574 intel_plane = to_intel_plane(plane);
575 idx = plane->base.id;
576
577 /* plane on different crtc cannot be a scaler user of this crtc */
578 if (drm_WARN_ON(&dev_priv->drm,
579 intel_plane->pipe != intel_crtc->pipe))
580 continue;
581
582 plane_state = intel_atomic_get_new_plane_state(intel_state,
583 intel_plane);
584 scaler_id = &plane_state->scaler_id;
585 }
586
587 ret = intel_atomic_setup_scaler(scaler_state, num_scalers_need,
588 intel_crtc, name, idx,
589 plane_state, scaler_id);
590 if (ret < 0)
591 return ret;
592 }
593
594 return 0;
595 }
596
glk_coef_tap(int i)597 static int glk_coef_tap(int i)
598 {
599 return i % 7;
600 }
601
glk_nearest_filter_coef(int t)602 static u16 glk_nearest_filter_coef(int t)
603 {
604 return t == 3 ? 0x0800 : 0x3000;
605 }
606
607 /*
608 * Theory behind setting nearest-neighbor integer scaling:
609 *
610 * 17 phase of 7 taps requires 119 coefficients in 60 dwords per set.
611 * The letter represents the filter tap (D is the center tap) and the number
612 * represents the coefficient set for a phase (0-16).
613 *
614 * +------------+------------------------+------------------------+
615 * |Index value | Data value coeffient 1 | Data value coeffient 2 |
616 * +------------+------------------------+------------------------+
617 * | 00h | B0 | A0 |
618 * +------------+------------------------+------------------------+
619 * | 01h | D0 | C0 |
620 * +------------+------------------------+------------------------+
621 * | 02h | F0 | E0 |
622 * +------------+------------------------+------------------------+
623 * | 03h | A1 | G0 |
624 * +------------+------------------------+------------------------+
625 * | 04h | C1 | B1 |
626 * +------------+------------------------+------------------------+
627 * | ... | ... | ... |
628 * +------------+------------------------+------------------------+
629 * | 38h | B16 | A16 |
630 * +------------+------------------------+------------------------+
631 * | 39h | D16 | C16 |
632 * +------------+------------------------+------------------------+
633 * | 3Ah | F16 | C16 |
634 * +------------+------------------------+------------------------+
635 * | 3Bh | Reserved | G16 |
636 * +------------+------------------------+------------------------+
637 *
638 * To enable nearest-neighbor scaling: program scaler coefficents with
639 * the center tap (Dxx) values set to 1 and all other values set to 0 as per
640 * SCALER_COEFFICIENT_FORMAT
641 *
642 */
643
glk_program_nearest_filter_coefs(struct drm_i915_private * dev_priv,enum pipe pipe,int id,int set)644 static void glk_program_nearest_filter_coefs(struct drm_i915_private *dev_priv,
645 enum pipe pipe, int id, int set)
646 {
647 int i;
648
649 intel_de_write_fw(dev_priv, GLK_PS_COEF_INDEX_SET(pipe, id, set),
650 PS_COEE_INDEX_AUTO_INC);
651
652 for (i = 0; i < 17 * 7; i += 2) {
653 u32 tmp;
654 int t;
655
656 t = glk_coef_tap(i);
657 tmp = glk_nearest_filter_coef(t);
658
659 t = glk_coef_tap(i + 1);
660 tmp |= glk_nearest_filter_coef(t) << 16;
661
662 intel_de_write_fw(dev_priv, GLK_PS_COEF_DATA_SET(pipe, id, set),
663 tmp);
664 }
665
666 intel_de_write_fw(dev_priv, GLK_PS_COEF_INDEX_SET(pipe, id, set), 0);
667 }
668
skl_scaler_get_filter_select(enum drm_scaling_filter filter,int set)669 static u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set)
670 {
671 if (filter == DRM_SCALING_FILTER_NEAREST_NEIGHBOR) {
672 return (PS_FILTER_PROGRAMMED |
673 PS_Y_VERT_FILTER_SELECT(set) |
674 PS_Y_HORZ_FILTER_SELECT(set) |
675 PS_UV_VERT_FILTER_SELECT(set) |
676 PS_UV_HORZ_FILTER_SELECT(set));
677 }
678
679 return PS_FILTER_MEDIUM;
680 }
681
skl_scaler_setup_filter(struct drm_i915_private * dev_priv,enum pipe pipe,int id,int set,enum drm_scaling_filter filter)682 static void skl_scaler_setup_filter(struct drm_i915_private *dev_priv, enum pipe pipe,
683 int id, int set, enum drm_scaling_filter filter)
684 {
685 switch (filter) {
686 case DRM_SCALING_FILTER_DEFAULT:
687 break;
688 case DRM_SCALING_FILTER_NEAREST_NEIGHBOR:
689 glk_program_nearest_filter_coefs(dev_priv, pipe, id, set);
690 break;
691 default:
692 MISSING_CASE(filter);
693 }
694 }
695
skl_pfit_enable(const struct intel_crtc_state * crtc_state)696 void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
697 {
698 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
699 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
700 const struct intel_crtc_scaler_state *scaler_state =
701 &crtc_state->scaler_state;
702 const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
703 u16 uv_rgb_hphase, uv_rgb_vphase;
704 enum pipe pipe = crtc->pipe;
705 int width = drm_rect_width(dst);
706 int height = drm_rect_height(dst);
707 int x = dst->x1;
708 int y = dst->y1;
709 int hscale, vscale;
710 struct drm_rect src;
711 int id;
712 u32 ps_ctrl;
713
714 if (!crtc_state->pch_pfit.enabled)
715 return;
716
717 if (drm_WARN_ON(&dev_priv->drm,
718 crtc_state->scaler_state.scaler_id < 0))
719 return;
720
721 drm_rect_init(&src, 0, 0,
722 drm_rect_width(&crtc_state->pipe_src) << 16,
723 drm_rect_height(&crtc_state->pipe_src) << 16);
724
725 hscale = drm_rect_calc_hscale(&src, dst, 0, INT_MAX);
726 vscale = drm_rect_calc_vscale(&src, dst, 0, INT_MAX);
727
728 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
729 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
730
731 id = scaler_state->scaler_id;
732
733 ps_ctrl = skl_scaler_get_filter_select(crtc_state->hw.scaling_filter, 0);
734 ps_ctrl |= PS_SCALER_EN | scaler_state->scalers[id].mode;
735
736 skl_scaler_setup_filter(dev_priv, pipe, id, 0,
737 crtc_state->hw.scaling_filter);
738
739 intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), ps_ctrl);
740
741 intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id),
742 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
743 intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id),
744 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
745 intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, id),
746 x << 16 | y);
747 intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id),
748 width << 16 | height);
749 }
750
751 void
skl_program_plane_scaler(struct intel_plane * plane,const struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state)752 skl_program_plane_scaler(struct intel_plane *plane,
753 const struct intel_crtc_state *crtc_state,
754 const struct intel_plane_state *plane_state)
755 {
756 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
757 const struct drm_framebuffer *fb = plane_state->hw.fb;
758 enum pipe pipe = plane->pipe;
759 int scaler_id = plane_state->scaler_id;
760 const struct intel_scaler *scaler =
761 &crtc_state->scaler_state.scalers[scaler_id];
762 int crtc_x = plane_state->uapi.dst.x1;
763 int crtc_y = plane_state->uapi.dst.y1;
764 u32 crtc_w = drm_rect_width(&plane_state->uapi.dst);
765 u32 crtc_h = drm_rect_height(&plane_state->uapi.dst);
766 u16 y_hphase, uv_rgb_hphase;
767 u16 y_vphase, uv_rgb_vphase;
768 int hscale, vscale;
769 u32 ps_ctrl;
770
771 hscale = drm_rect_calc_hscale(&plane_state->uapi.src,
772 &plane_state->uapi.dst,
773 0, INT_MAX);
774 vscale = drm_rect_calc_vscale(&plane_state->uapi.src,
775 &plane_state->uapi.dst,
776 0, INT_MAX);
777
778 /* TODO: handle sub-pixel coordinates */
779 if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
780 !icl_is_hdr_plane(dev_priv, plane->id)) {
781 y_hphase = skl_scaler_calc_phase(1, hscale, false);
782 y_vphase = skl_scaler_calc_phase(1, vscale, false);
783
784 /* MPEG2 chroma siting convention */
785 uv_rgb_hphase = skl_scaler_calc_phase(2, hscale, true);
786 uv_rgb_vphase = skl_scaler_calc_phase(2, vscale, false);
787 } else {
788 /* not used */
789 y_hphase = 0;
790 y_vphase = 0;
791
792 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
793 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
794 }
795
796 ps_ctrl = skl_scaler_get_filter_select(plane_state->hw.scaling_filter, 0);
797 ps_ctrl |= PS_SCALER_EN | PS_PLANE_SEL(plane->id) | scaler->mode;
798
799 skl_scaler_setup_filter(dev_priv, pipe, scaler_id, 0,
800 plane_state->hw.scaling_filter);
801
802 intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
803 intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, scaler_id),
804 PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
805 intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, scaler_id),
806 PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase));
807 intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, scaler_id),
808 (crtc_x << 16) | crtc_y);
809 intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, scaler_id),
810 (crtc_w << 16) | crtc_h);
811 }
812
skl_detach_scaler(struct intel_crtc * crtc,int id)813 static void skl_detach_scaler(struct intel_crtc *crtc, int id)
814 {
815 struct drm_device *dev = crtc->base.dev;
816 struct drm_i915_private *dev_priv = to_i915(dev);
817
818 intel_de_write_fw(dev_priv, SKL_PS_CTRL(crtc->pipe, id), 0);
819 intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(crtc->pipe, id), 0);
820 intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, id), 0);
821 }
822
823 /*
824 * This function detaches (aka. unbinds) unused scalers in hardware
825 */
skl_detach_scalers(const struct intel_crtc_state * crtc_state)826 void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
827 {
828 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
829 const struct intel_crtc_scaler_state *scaler_state =
830 &crtc_state->scaler_state;
831 int i;
832
833 /* loop through and disable scalers that aren't in use */
834 for (i = 0; i < crtc->num_scalers; i++) {
835 if (!scaler_state->scalers[i].in_use)
836 skl_detach_scaler(crtc, i);
837 }
838 }
839
skl_scaler_disable(const struct intel_crtc_state * old_crtc_state)840 void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state)
841 {
842 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
843 int i;
844
845 for (i = 0; i < crtc->num_scalers; i++)
846 skl_detach_scaler(crtc, i);
847 }
848