• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
4  * Copyright (C) 2013 Red Hat
5  * Author: Rob Clark <robdclark@gmail.com>
6  */
7 
8 #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
9 #include <linux/sort.h>
10 #include <linux/debugfs.h>
11 #include <linux/ktime.h>
12 #include <linux/bits.h>
13 
14 #include <drm/drm_atomic.h>
15 #include <drm/drm_crtc.h>
16 #include <drm/drm_flip_work.h>
17 #include <drm/drm_mode.h>
18 #include <drm/drm_probe_helper.h>
19 #include <drm/drm_rect.h>
20 #include <drm/drm_vblank.h>
21 
22 #include "dpu_kms.h"
23 #include "dpu_hw_lm.h"
24 #include "dpu_hw_ctl.h"
25 #include "dpu_hw_dspp.h"
26 #include "dpu_crtc.h"
27 #include "dpu_plane.h"
28 #include "dpu_encoder.h"
29 #include "dpu_vbif.h"
30 #include "dpu_core_perf.h"
31 #include "dpu_trace.h"
32 
33 /* layer mixer index on dpu_crtc */
34 #define LEFT_MIXER 0
35 #define RIGHT_MIXER 1
36 
37 /* timeout in ms waiting for frame done */
38 #define DPU_CRTC_FRAME_DONE_TIMEOUT_MS	60
39 
40 #define	CONVERT_S3_15(val) \
41 	(((((u64)val) & ~BIT_ULL(63)) >> 17) & GENMASK_ULL(17, 0))
42 
_dpu_crtc_get_kms(struct drm_crtc * crtc)43 static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
44 {
45 	struct msm_drm_private *priv = crtc->dev->dev_private;
46 
47 	return to_dpu_kms(priv->kms);
48 }
49 
dpu_crtc_destroy(struct drm_crtc * crtc)50 static void dpu_crtc_destroy(struct drm_crtc *crtc)
51 {
52 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
53 
54 	if (!crtc)
55 		return;
56 
57 	drm_crtc_cleanup(crtc);
58 	kfree(dpu_crtc);
59 }
60 
get_encoder_from_crtc(struct drm_crtc * crtc)61 static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc)
62 {
63 	struct drm_device *dev = crtc->dev;
64 	struct drm_encoder *encoder;
65 
66 	drm_for_each_encoder(encoder, dev)
67 		if (encoder->crtc == crtc)
68 			return encoder;
69 
70 	return NULL;
71 }
72 
dpu_crtc_get_vblank_counter(struct drm_crtc * crtc)73 static u32 dpu_crtc_get_vblank_counter(struct drm_crtc *crtc)
74 {
75 	struct drm_encoder *encoder;
76 
77 	encoder = get_encoder_from_crtc(crtc);
78 	if (!encoder) {
79 		DRM_ERROR("no encoder found for crtc %d\n", crtc->index);
80 		return false;
81 	}
82 
83 	return dpu_encoder_get_frame_count(encoder);
84 }
85 
dpu_crtc_get_scanout_position(struct drm_crtc * crtc,bool in_vblank_irq,int * vpos,int * hpos,ktime_t * stime,ktime_t * etime,const struct drm_display_mode * mode)86 static bool dpu_crtc_get_scanout_position(struct drm_crtc *crtc,
87 					   bool in_vblank_irq,
88 					   int *vpos, int *hpos,
89 					   ktime_t *stime, ktime_t *etime,
90 					   const struct drm_display_mode *mode)
91 {
92 	unsigned int pipe = crtc->index;
93 	struct drm_encoder *encoder;
94 	int line, vsw, vbp, vactive_start, vactive_end, vfp_end;
95 
96 	encoder = get_encoder_from_crtc(crtc);
97 	if (!encoder) {
98 		DRM_ERROR("no encoder found for crtc %d\n", pipe);
99 		return false;
100 	}
101 
102 	vsw = mode->crtc_vsync_end - mode->crtc_vsync_start;
103 	vbp = mode->crtc_vtotal - mode->crtc_vsync_end;
104 
105 	/*
106 	 * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at
107 	 * the end of VFP. Translate the porch values relative to the line
108 	 * counter positions.
109 	 */
110 
111 	vactive_start = vsw + vbp + 1;
112 	vactive_end = vactive_start + mode->crtc_vdisplay;
113 
114 	/* last scan line before VSYNC */
115 	vfp_end = mode->crtc_vtotal;
116 
117 	if (stime)
118 		*stime = ktime_get();
119 
120 	line = dpu_encoder_get_linecount(encoder);
121 
122 	if (line < vactive_start)
123 		line -= vactive_start;
124 	else if (line > vactive_end)
125 		line = line - vfp_end - vactive_start;
126 	else
127 		line -= vactive_start;
128 
129 	*vpos = line;
130 	*hpos = 0;
131 
132 	if (etime)
133 		*etime = ktime_get();
134 
135 	return true;
136 }
137 
_dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer * mixer,struct dpu_plane_state * pstate,struct dpu_format * format)138 static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
139 		struct dpu_plane_state *pstate, struct dpu_format *format)
140 {
141 	struct dpu_hw_mixer *lm = mixer->hw_lm;
142 	uint32_t blend_op;
143 	uint32_t fg_alpha, bg_alpha;
144 
145 	fg_alpha = pstate->base.alpha >> 8;
146 	bg_alpha = 0xff - fg_alpha;
147 
148 	/* default to opaque blending */
149 	if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PIXEL_NONE ||
150 	    !format->alpha_enable) {
151 		blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
152 			DPU_BLEND_BG_ALPHA_BG_CONST;
153 	} else if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
154 		blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
155 			DPU_BLEND_BG_ALPHA_FG_PIXEL;
156 		if (fg_alpha != 0xff) {
157 			bg_alpha = fg_alpha;
158 			blend_op |= DPU_BLEND_BG_MOD_ALPHA |
159 				    DPU_BLEND_BG_INV_MOD_ALPHA;
160 		} else {
161 			blend_op |= DPU_BLEND_BG_INV_ALPHA;
162 		}
163 	} else {
164 		/* coverage blending */
165 		blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL |
166 			DPU_BLEND_BG_ALPHA_FG_PIXEL;
167 		if (fg_alpha != 0xff) {
168 			bg_alpha = fg_alpha;
169 			blend_op |= DPU_BLEND_FG_MOD_ALPHA |
170 				    DPU_BLEND_FG_INV_MOD_ALPHA |
171 				    DPU_BLEND_BG_MOD_ALPHA |
172 				    DPU_BLEND_BG_INV_MOD_ALPHA;
173 		} else {
174 			blend_op |= DPU_BLEND_BG_INV_ALPHA;
175 		}
176 	}
177 
178 	lm->ops.setup_blend_config(lm, pstate->stage,
179 				fg_alpha, bg_alpha, blend_op);
180 
181 	DRM_DEBUG_ATOMIC("format:%p4cc, alpha_en:%u blend_op:0x%x\n",
182 		  &format->base.pixel_format, format->alpha_enable, blend_op);
183 }
184 
_dpu_crtc_program_lm_output_roi(struct drm_crtc * crtc)185 static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
186 {
187 	struct dpu_crtc_state *crtc_state;
188 	int lm_idx, lm_horiz_position;
189 
190 	crtc_state = to_dpu_crtc_state(crtc->state);
191 
192 	lm_horiz_position = 0;
193 	for (lm_idx = 0; lm_idx < crtc_state->num_mixers; lm_idx++) {
194 		const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx];
195 		struct dpu_hw_mixer *hw_lm = crtc_state->mixers[lm_idx].hw_lm;
196 		struct dpu_hw_mixer_cfg cfg;
197 
198 		if (!lm_roi || !drm_rect_visible(lm_roi))
199 			continue;
200 
201 		cfg.out_width = drm_rect_width(lm_roi);
202 		cfg.out_height = drm_rect_height(lm_roi);
203 		cfg.right_mixer = lm_horiz_position++;
204 		cfg.flags = 0;
205 		hw_lm->ops.setup_mixer_out(hw_lm, &cfg);
206 	}
207 }
208 
_dpu_crtc_blend_setup_mixer(struct drm_crtc * crtc,struct dpu_crtc * dpu_crtc,struct dpu_crtc_mixer * mixer)209 static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
210 	struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer)
211 {
212 	struct drm_plane *plane;
213 	struct drm_framebuffer *fb;
214 	struct drm_plane_state *state;
215 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
216 	struct dpu_plane_state *pstate = NULL;
217 	struct dpu_format *format;
218 	struct dpu_hw_ctl *ctl = mixer->lm_ctl;
219 	struct dpu_hw_stage_cfg *stage_cfg = &dpu_crtc->stage_cfg;
220 
221 	u32 flush_mask;
222 	uint32_t stage_idx, lm_idx;
223 	int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 };
224 	bool bg_alpha_enable = false;
225 	DECLARE_BITMAP(fetch_active, SSPP_MAX);
226 
227 	memset(fetch_active, 0, sizeof(fetch_active));
228 	drm_atomic_crtc_for_each_plane(plane, crtc) {
229 		state = plane->state;
230 		if (!state)
231 			continue;
232 
233 		if (!state->visible)
234 			continue;
235 
236 		pstate = to_dpu_plane_state(state);
237 		fb = state->fb;
238 
239 		dpu_plane_get_ctl_flush(plane, ctl, &flush_mask);
240 		set_bit(dpu_plane_pipe(plane), fetch_active);
241 
242 		DRM_DEBUG_ATOMIC("crtc %d stage:%d - plane %d sspp %d fb %d\n",
243 				crtc->base.id,
244 				pstate->stage,
245 				plane->base.id,
246 				dpu_plane_pipe(plane) - SSPP_VIG0,
247 				state->fb ? state->fb->base.id : -1);
248 
249 		format = to_dpu_format(msm_framebuffer_format(pstate->base.fb));
250 
251 		if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable)
252 			bg_alpha_enable = true;
253 
254 		stage_idx = zpos_cnt[pstate->stage]++;
255 		stage_cfg->stage[pstate->stage][stage_idx] =
256 					dpu_plane_pipe(plane);
257 		stage_cfg->multirect_index[pstate->stage][stage_idx] =
258 					pstate->multirect_index;
259 
260 		trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane),
261 					   state, pstate, stage_idx,
262 					   dpu_plane_pipe(plane) - SSPP_VIG0,
263 					   format->base.pixel_format,
264 					   fb ? fb->modifier : 0);
265 
266 		/* blend config update */
267 		for (lm_idx = 0; lm_idx < cstate->num_mixers; lm_idx++) {
268 			_dpu_crtc_setup_blend_cfg(mixer + lm_idx,
269 						pstate, format);
270 
271 			mixer[lm_idx].flush_mask |= flush_mask;
272 
273 			if (bg_alpha_enable && !format->alpha_enable)
274 				mixer[lm_idx].mixer_op_mode = 0;
275 			else
276 				mixer[lm_idx].mixer_op_mode |=
277 						1 << pstate->stage;
278 		}
279 	}
280 
281 	if (ctl->ops.set_active_pipes)
282 		ctl->ops.set_active_pipes(ctl, fetch_active);
283 
284 	 _dpu_crtc_program_lm_output_roi(crtc);
285 }
286 
287 /**
288  * _dpu_crtc_blend_setup - configure crtc mixers
289  * @crtc: Pointer to drm crtc structure
290  */
_dpu_crtc_blend_setup(struct drm_crtc * crtc)291 static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
292 {
293 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
294 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
295 	struct dpu_crtc_mixer *mixer = cstate->mixers;
296 	struct dpu_hw_ctl *ctl;
297 	struct dpu_hw_mixer *lm;
298 	int i;
299 
300 	DRM_DEBUG_ATOMIC("%s\n", dpu_crtc->name);
301 
302 	for (i = 0; i < cstate->num_mixers; i++) {
303 		mixer[i].mixer_op_mode = 0;
304 		mixer[i].flush_mask = 0;
305 		if (mixer[i].lm_ctl->ops.clear_all_blendstages)
306 			mixer[i].lm_ctl->ops.clear_all_blendstages(
307 					mixer[i].lm_ctl);
308 	}
309 
310 	/* initialize stage cfg */
311 	memset(&dpu_crtc->stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg));
312 
313 	_dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer);
314 
315 	for (i = 0; i < cstate->num_mixers; i++) {
316 		ctl = mixer[i].lm_ctl;
317 		lm = mixer[i].hw_lm;
318 
319 		lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
320 
321 		mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl,
322 			mixer[i].hw_lm->idx);
323 
324 		/* stage config flush mask */
325 		ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
326 
327 		DRM_DEBUG_ATOMIC("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n",
328 			mixer[i].hw_lm->idx - LM_0,
329 			mixer[i].mixer_op_mode,
330 			ctl->idx - CTL_0,
331 			mixer[i].flush_mask);
332 
333 		ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
334 			&dpu_crtc->stage_cfg);
335 	}
336 }
337 
338 /**
339  *  _dpu_crtc_complete_flip - signal pending page_flip events
340  * Any pending vblank events are added to the vblank_event_list
341  * so that the next vblank interrupt shall signal them.
342  * However PAGE_FLIP events are not handled through the vblank_event_list.
343  * This API signals any pending PAGE_FLIP events requested through
344  * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the dpu_crtc->event.
345  * @crtc: Pointer to drm crtc structure
346  */
_dpu_crtc_complete_flip(struct drm_crtc * crtc)347 static void _dpu_crtc_complete_flip(struct drm_crtc *crtc)
348 {
349 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
350 	struct drm_device *dev = crtc->dev;
351 	unsigned long flags;
352 
353 	spin_lock_irqsave(&dev->event_lock, flags);
354 	if (dpu_crtc->event) {
355 		DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name,
356 			      dpu_crtc->event);
357 		trace_dpu_crtc_complete_flip(DRMID(crtc));
358 		drm_crtc_send_vblank_event(crtc, dpu_crtc->event);
359 		dpu_crtc->event = NULL;
360 	}
361 	spin_unlock_irqrestore(&dev->event_lock, flags);
362 }
363 
dpu_crtc_get_intf_mode(struct drm_crtc * crtc)364 enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
365 {
366 	struct drm_encoder *encoder;
367 
368 	/*
369 	 * TODO: This function is called from dpu debugfs and as part of atomic
370 	 * check. When called from debugfs, the crtc->mutex must be held to
371 	 * read crtc->state. However reading crtc->state from atomic check isn't
372 	 * allowed (unless you have a good reason, a big comment, and a deep
373 	 * understanding of how the atomic/modeset locks work (<- and this is
374 	 * probably not possible)). So we'll keep the WARN_ON here for now, but
375 	 * really we need to figure out a better way to track our operating mode
376 	 */
377 	WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
378 
379 	/* TODO: Returns the first INTF_MODE, could there be multiple values? */
380 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
381 		return dpu_encoder_get_intf_mode(encoder);
382 
383 	return INTF_MODE_NONE;
384 }
385 
dpu_crtc_vblank_callback(struct drm_crtc * crtc)386 void dpu_crtc_vblank_callback(struct drm_crtc *crtc)
387 {
388 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
389 
390 	/* keep statistics on vblank callback - with auto reset via debugfs */
391 	if (ktime_compare(dpu_crtc->vblank_cb_time, ktime_set(0, 0)) == 0)
392 		dpu_crtc->vblank_cb_time = ktime_get();
393 	else
394 		dpu_crtc->vblank_cb_count++;
395 	drm_crtc_handle_vblank(crtc);
396 	trace_dpu_crtc_vblank_cb(DRMID(crtc));
397 }
398 
dpu_crtc_frame_event_work(struct kthread_work * work)399 static void dpu_crtc_frame_event_work(struct kthread_work *work)
400 {
401 	struct dpu_crtc_frame_event *fevent = container_of(work,
402 			struct dpu_crtc_frame_event, work);
403 	struct drm_crtc *crtc = fevent->crtc;
404 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
405 	unsigned long flags;
406 	bool frame_done = false;
407 
408 	DPU_ATRACE_BEGIN("crtc_frame_event");
409 
410 	DRM_DEBUG_ATOMIC("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
411 			ktime_to_ns(fevent->ts));
412 
413 	if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
414 				| DPU_ENCODER_FRAME_EVENT_ERROR
415 				| DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
416 
417 		if (atomic_read(&dpu_crtc->frame_pending) < 1) {
418 			/* ignore vblank when not pending */
419 		} else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) {
420 			/* release bandwidth and other resources */
421 			trace_dpu_crtc_frame_event_done(DRMID(crtc),
422 							fevent->event);
423 			dpu_core_perf_crtc_release_bw(crtc);
424 		} else {
425 			trace_dpu_crtc_frame_event_more_pending(DRMID(crtc),
426 								fevent->event);
427 		}
428 
429 		if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
430 					| DPU_ENCODER_FRAME_EVENT_ERROR))
431 			frame_done = true;
432 	}
433 
434 	if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)
435 		DPU_ERROR("crtc%d ts:%lld received panel dead event\n",
436 				crtc->base.id, ktime_to_ns(fevent->ts));
437 
438 	if (frame_done)
439 		complete_all(&dpu_crtc->frame_done_comp);
440 
441 	spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
442 	list_add_tail(&fevent->list, &dpu_crtc->frame_event_list);
443 	spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
444 	DPU_ATRACE_END("crtc_frame_event");
445 }
446 
447 /*
448  * dpu_crtc_frame_event_cb - crtc frame event callback API. CRTC module
449  * registers this API to encoder for all frame event callbacks like
450  * frame_error, frame_done, idle_timeout, etc. Encoder may call different events
451  * from different context - IRQ, user thread, commit_thread, etc. Each event
452  * should be carefully reviewed and should be processed in proper task context
453  * to avoid schedulin delay or properly manage the irq context's bottom half
454  * processing.
455  */
dpu_crtc_frame_event_cb(void * data,u32 event)456 static void dpu_crtc_frame_event_cb(void *data, u32 event)
457 {
458 	struct drm_crtc *crtc = (struct drm_crtc *)data;
459 	struct dpu_crtc *dpu_crtc;
460 	struct msm_drm_private *priv;
461 	struct dpu_crtc_frame_event *fevent;
462 	unsigned long flags;
463 	u32 crtc_id;
464 
465 	/* Nothing to do on idle event */
466 	if (event & DPU_ENCODER_FRAME_EVENT_IDLE)
467 		return;
468 
469 	dpu_crtc = to_dpu_crtc(crtc);
470 	priv = crtc->dev->dev_private;
471 	crtc_id = drm_crtc_index(crtc);
472 
473 	trace_dpu_crtc_frame_event_cb(DRMID(crtc), event);
474 
475 	spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
476 	fevent = list_first_entry_or_null(&dpu_crtc->frame_event_list,
477 			struct dpu_crtc_frame_event, list);
478 	if (fevent)
479 		list_del_init(&fevent->list);
480 	spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
481 
482 	if (!fevent) {
483 		DRM_ERROR_RATELIMITED("crtc%d event %d overflow\n", crtc->base.id, event);
484 		return;
485 	}
486 
487 	fevent->event = event;
488 	fevent->crtc = crtc;
489 	fevent->ts = ktime_get();
490 	kthread_queue_work(priv->event_thread[crtc_id].worker, &fevent->work);
491 }
492 
dpu_crtc_complete_commit(struct drm_crtc * crtc)493 void dpu_crtc_complete_commit(struct drm_crtc *crtc)
494 {
495 	trace_dpu_crtc_complete_commit(DRMID(crtc));
496 	dpu_core_perf_crtc_update(crtc, 0, false);
497 	_dpu_crtc_complete_flip(crtc);
498 }
499 
_dpu_crtc_setup_lm_bounds(struct drm_crtc * crtc,struct drm_crtc_state * state)500 static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
501 		struct drm_crtc_state *state)
502 {
503 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
504 	struct drm_display_mode *adj_mode = &state->adjusted_mode;
505 	u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers;
506 	int i;
507 
508 	for (i = 0; i < cstate->num_mixers; i++) {
509 		struct drm_rect *r = &cstate->lm_bounds[i];
510 		r->x1 = crtc_split_width * i;
511 		r->y1 = 0;
512 		r->x2 = r->x1 + crtc_split_width;
513 		r->y2 = adj_mode->vdisplay;
514 
515 		trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
516 	}
517 }
518 
_dpu_crtc_get_pcc_coeff(struct drm_crtc_state * state,struct dpu_hw_pcc_cfg * cfg)519 static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state,
520 		struct dpu_hw_pcc_cfg *cfg)
521 {
522 	struct drm_color_ctm *ctm;
523 
524 	memset(cfg, 0, sizeof(struct dpu_hw_pcc_cfg));
525 
526 	ctm = (struct drm_color_ctm *)state->ctm->data;
527 
528 	if (!ctm)
529 		return;
530 
531 	cfg->r.r = CONVERT_S3_15(ctm->matrix[0]);
532 	cfg->g.r = CONVERT_S3_15(ctm->matrix[1]);
533 	cfg->b.r = CONVERT_S3_15(ctm->matrix[2]);
534 
535 	cfg->r.g = CONVERT_S3_15(ctm->matrix[3]);
536 	cfg->g.g = CONVERT_S3_15(ctm->matrix[4]);
537 	cfg->b.g = CONVERT_S3_15(ctm->matrix[5]);
538 
539 	cfg->r.b = CONVERT_S3_15(ctm->matrix[6]);
540 	cfg->g.b = CONVERT_S3_15(ctm->matrix[7]);
541 	cfg->b.b = CONVERT_S3_15(ctm->matrix[8]);
542 }
543 
_dpu_crtc_setup_cp_blocks(struct drm_crtc * crtc)544 static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc)
545 {
546 	struct drm_crtc_state *state = crtc->state;
547 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
548 	struct dpu_crtc_mixer *mixer = cstate->mixers;
549 	struct dpu_hw_pcc_cfg cfg;
550 	struct dpu_hw_ctl *ctl;
551 	struct dpu_hw_dspp *dspp;
552 	int i;
553 
554 
555 	if (!state->color_mgmt_changed)
556 		return;
557 
558 	for (i = 0; i < cstate->num_mixers; i++) {
559 		ctl = mixer[i].lm_ctl;
560 		dspp = mixer[i].hw_dspp;
561 
562 		if (!dspp || !dspp->ops.setup_pcc)
563 			continue;
564 
565 		if (!state->ctm) {
566 			dspp->ops.setup_pcc(dspp, NULL);
567 		} else {
568 			_dpu_crtc_get_pcc_coeff(state, &cfg);
569 			dspp->ops.setup_pcc(dspp, &cfg);
570 		}
571 
572 		mixer[i].flush_mask |= ctl->ops.get_bitmask_dspp(ctl,
573 			mixer[i].hw_dspp->idx);
574 
575 		/* stage config flush mask */
576 		ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
577 
578 		DRM_DEBUG_ATOMIC("lm %d, ctl %d, flush mask 0x%x\n",
579 			mixer[i].hw_lm->idx - DSPP_0,
580 			ctl->idx - CTL_0,
581 			mixer[i].flush_mask);
582 	}
583 }
584 
dpu_crtc_atomic_begin(struct drm_crtc * crtc,struct drm_atomic_state * state)585 static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
586 		struct drm_atomic_state *state)
587 {
588 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
589 	struct drm_encoder *encoder;
590 
591 	if (!crtc->state->enable) {
592 		DRM_DEBUG_ATOMIC("crtc%d -> enable %d, skip atomic_begin\n",
593 				crtc->base.id, crtc->state->enable);
594 		return;
595 	}
596 
597 	DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
598 
599 	_dpu_crtc_setup_lm_bounds(crtc, crtc->state);
600 
601 	/* encoder will trigger pending mask now */
602 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
603 		dpu_encoder_trigger_kickoff_pending(encoder);
604 
605 	/*
606 	 * If no mixers have been allocated in dpu_crtc_atomic_check(),
607 	 * it means we are trying to flush a CRTC whose state is disabled:
608 	 * nothing else needs to be done.
609 	 */
610 	if (unlikely(!cstate->num_mixers))
611 		return;
612 
613 	_dpu_crtc_blend_setup(crtc);
614 
615 	_dpu_crtc_setup_cp_blocks(crtc);
616 
617 	/*
618 	 * PP_DONE irq is only used by command mode for now.
619 	 * It is better to request pending before FLUSH and START trigger
620 	 * to make sure no pp_done irq missed.
621 	 * This is safe because no pp_done will happen before SW trigger
622 	 * in command mode.
623 	 */
624 }
625 
dpu_crtc_atomic_flush(struct drm_crtc * crtc,struct drm_atomic_state * state)626 static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
627 		struct drm_atomic_state *state)
628 {
629 	struct dpu_crtc *dpu_crtc;
630 	struct drm_device *dev;
631 	struct drm_plane *plane;
632 	struct msm_drm_private *priv;
633 	unsigned long flags;
634 	struct dpu_crtc_state *cstate;
635 
636 	if (!crtc->state->enable) {
637 		DRM_DEBUG_ATOMIC("crtc%d -> enable %d, skip atomic_flush\n",
638 				crtc->base.id, crtc->state->enable);
639 		return;
640 	}
641 
642 	DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
643 
644 	dpu_crtc = to_dpu_crtc(crtc);
645 	cstate = to_dpu_crtc_state(crtc->state);
646 	dev = crtc->dev;
647 	priv = dev->dev_private;
648 
649 	if (crtc->index >= ARRAY_SIZE(priv->event_thread)) {
650 		DPU_ERROR("invalid crtc index[%d]\n", crtc->index);
651 		return;
652 	}
653 
654 	WARN_ON(dpu_crtc->event);
655 	spin_lock_irqsave(&dev->event_lock, flags);
656 	dpu_crtc->event = crtc->state->event;
657 	crtc->state->event = NULL;
658 	spin_unlock_irqrestore(&dev->event_lock, flags);
659 
660 	/*
661 	 * If no mixers has been allocated in dpu_crtc_atomic_check(),
662 	 * it means we are trying to flush a CRTC whose state is disabled:
663 	 * nothing else needs to be done.
664 	 */
665 	if (unlikely(!cstate->num_mixers))
666 		return;
667 
668 	/* update performance setting before crtc kickoff */
669 	dpu_core_perf_crtc_update(crtc, 1, false);
670 
671 	/*
672 	 * Final plane updates: Give each plane a chance to complete all
673 	 *                      required writes/flushing before crtc's "flush
674 	 *                      everything" call below.
675 	 */
676 	drm_atomic_crtc_for_each_plane(plane, crtc) {
677 		if (dpu_crtc->smmu_state.transition_error)
678 			dpu_plane_set_error(plane, true);
679 		dpu_plane_flush(plane);
680 	}
681 
682 	/* Kickoff will be scheduled by outer layer */
683 }
684 
685 /**
686  * dpu_crtc_destroy_state - state destroy hook
687  * @crtc: drm CRTC
688  * @state: CRTC state object to release
689  */
dpu_crtc_destroy_state(struct drm_crtc * crtc,struct drm_crtc_state * state)690 static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
691 		struct drm_crtc_state *state)
692 {
693 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
694 
695 	DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
696 
697 	__drm_atomic_helper_crtc_destroy_state(state);
698 
699 	kfree(cstate);
700 }
701 
_dpu_crtc_wait_for_frame_done(struct drm_crtc * crtc)702 static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
703 {
704 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
705 	int ret, rc = 0;
706 
707 	if (!atomic_read(&dpu_crtc->frame_pending)) {
708 		DRM_DEBUG_ATOMIC("no frames pending\n");
709 		return 0;
710 	}
711 
712 	DPU_ATRACE_BEGIN("frame done completion wait");
713 	ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp,
714 			msecs_to_jiffies(DPU_CRTC_FRAME_DONE_TIMEOUT_MS));
715 	if (!ret) {
716 		DRM_ERROR("frame done wait timed out, ret:%d\n", ret);
717 		rc = -ETIMEDOUT;
718 	}
719 	DPU_ATRACE_END("frame done completion wait");
720 
721 	return rc;
722 }
723 
dpu_crtc_commit_kickoff(struct drm_crtc * crtc)724 void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
725 {
726 	struct drm_encoder *encoder;
727 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
728 	struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
729 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
730 
731 	/*
732 	 * If no mixers has been allocated in dpu_crtc_atomic_check(),
733 	 * it means we are trying to start a CRTC whose state is disabled:
734 	 * nothing else needs to be done.
735 	 */
736 	if (unlikely(!cstate->num_mixers))
737 		return;
738 
739 	DPU_ATRACE_BEGIN("crtc_commit");
740 
741 	/*
742 	 * Encoder will flush/start now, unless it has a tx pending. If so, it
743 	 * may delay and flush at an irq event (e.g. ppdone)
744 	 */
745 	drm_for_each_encoder_mask(encoder, crtc->dev,
746 				  crtc->state->encoder_mask)
747 		dpu_encoder_prepare_for_kickoff(encoder);
748 
749 	if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
750 		/* acquire bandwidth and other resources */
751 		DRM_DEBUG_ATOMIC("crtc%d first commit\n", crtc->base.id);
752 	} else
753 		DRM_DEBUG_ATOMIC("crtc%d commit\n", crtc->base.id);
754 
755 	dpu_crtc->play_count++;
756 
757 	dpu_vbif_clear_errors(dpu_kms);
758 
759 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
760 		dpu_encoder_kickoff(encoder);
761 
762 	reinit_completion(&dpu_crtc->frame_done_comp);
763 	DPU_ATRACE_END("crtc_commit");
764 }
765 
dpu_crtc_reset(struct drm_crtc * crtc)766 static void dpu_crtc_reset(struct drm_crtc *crtc)
767 {
768 	struct dpu_crtc_state *cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
769 
770 	if (crtc->state)
771 		dpu_crtc_destroy_state(crtc, crtc->state);
772 
773 	if (cstate)
774 		__drm_atomic_helper_crtc_reset(crtc, &cstate->base);
775 	else
776 		__drm_atomic_helper_crtc_reset(crtc, NULL);
777 }
778 
779 /**
780  * dpu_crtc_duplicate_state - state duplicate hook
781  * @crtc: Pointer to drm crtc structure
782  */
dpu_crtc_duplicate_state(struct drm_crtc * crtc)783 static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
784 {
785 	struct dpu_crtc_state *cstate, *old_cstate = to_dpu_crtc_state(crtc->state);
786 
787 	cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL);
788 	if (!cstate) {
789 		DPU_ERROR("failed to allocate state\n");
790 		return NULL;
791 	}
792 
793 	/* duplicate base helper */
794 	__drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
795 
796 	return &cstate->base;
797 }
798 
dpu_crtc_disable(struct drm_crtc * crtc,struct drm_atomic_state * state)799 static void dpu_crtc_disable(struct drm_crtc *crtc,
800 			     struct drm_atomic_state *state)
801 {
802 	struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
803 									      crtc);
804 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
805 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
806 	struct drm_encoder *encoder;
807 	unsigned long flags;
808 	bool release_bandwidth = false;
809 
810 	DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
811 
812 	/* Disable/save vblank irq handling */
813 	drm_crtc_vblank_off(crtc);
814 
815 	drm_for_each_encoder_mask(encoder, crtc->dev,
816 				  old_crtc_state->encoder_mask) {
817 		/* in video mode, we hold an extra bandwidth reference
818 		 * as we cannot drop bandwidth at frame-done if any
819 		 * crtc is being used in video mode.
820 		 */
821 		if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
822 			release_bandwidth = true;
823 		dpu_encoder_assign_crtc(encoder, NULL);
824 	}
825 
826 	/* wait for frame_event_done completion */
827 	if (_dpu_crtc_wait_for_frame_done(crtc))
828 		DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
829 				crtc->base.id,
830 				atomic_read(&dpu_crtc->frame_pending));
831 
832 	trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc);
833 	dpu_crtc->enabled = false;
834 
835 	if (atomic_read(&dpu_crtc->frame_pending)) {
836 		trace_dpu_crtc_disable_frame_pending(DRMID(crtc),
837 				     atomic_read(&dpu_crtc->frame_pending));
838 		if (release_bandwidth)
839 			dpu_core_perf_crtc_release_bw(crtc);
840 		atomic_set(&dpu_crtc->frame_pending, 0);
841 	}
842 
843 	dpu_core_perf_crtc_update(crtc, 0, true);
844 
845 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
846 		dpu_encoder_register_frame_event_callback(encoder, NULL, NULL);
847 
848 	memset(cstate->mixers, 0, sizeof(cstate->mixers));
849 	cstate->num_mixers = 0;
850 
851 	/* disable clk & bw control until clk & bw properties are set */
852 	cstate->bw_control = false;
853 	cstate->bw_split_vote = false;
854 
855 	if (crtc->state->event && !crtc->state->active) {
856 		spin_lock_irqsave(&crtc->dev->event_lock, flags);
857 		drm_crtc_send_vblank_event(crtc, crtc->state->event);
858 		crtc->state->event = NULL;
859 		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
860 	}
861 
862 	pm_runtime_put_sync(crtc->dev->dev);
863 }
864 
dpu_crtc_enable(struct drm_crtc * crtc,struct drm_atomic_state * state)865 static void dpu_crtc_enable(struct drm_crtc *crtc,
866 		struct drm_atomic_state *state)
867 {
868 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
869 	struct drm_encoder *encoder;
870 	bool request_bandwidth = false;
871 
872 	pm_runtime_get_sync(crtc->dev->dev);
873 
874 	DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
875 
876 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) {
877 		/* in video mode, we hold an extra bandwidth reference
878 		 * as we cannot drop bandwidth at frame-done if any
879 		 * crtc is being used in video mode.
880 		 */
881 		if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
882 			request_bandwidth = true;
883 		dpu_encoder_register_frame_event_callback(encoder,
884 				dpu_crtc_frame_event_cb, (void *)crtc);
885 	}
886 
887 	if (request_bandwidth)
888 		atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
889 
890 	trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
891 	dpu_crtc->enabled = true;
892 
893 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
894 		dpu_encoder_assign_crtc(encoder, crtc);
895 
896 	/* Enable/restore vblank irq handling */
897 	drm_crtc_vblank_on(crtc);
898 }
899 
900 struct plane_state {
901 	struct dpu_plane_state *dpu_pstate;
902 	const struct drm_plane_state *drm_pstate;
903 	int stage;
904 	u32 pipe_id;
905 };
906 
dpu_crtc_needs_dirtyfb(struct drm_crtc_state * cstate)907 static bool dpu_crtc_needs_dirtyfb(struct drm_crtc_state *cstate)
908 {
909 	struct drm_crtc *crtc = cstate->crtc;
910 	struct drm_encoder *encoder;
911 
912 	drm_for_each_encoder_mask (encoder, crtc->dev, cstate->encoder_mask) {
913 		if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_CMD) {
914 			return true;
915 		}
916 	}
917 
918 	return false;
919 }
920 
dpu_crtc_atomic_check(struct drm_crtc * crtc,struct drm_atomic_state * state)921 static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
922 		struct drm_atomic_state *state)
923 {
924 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
925 									  crtc);
926 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
927 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc_state);
928 	struct plane_state *pstates;
929 
930 	const struct drm_plane_state *pstate;
931 	struct drm_plane *plane;
932 	struct drm_display_mode *mode;
933 
934 	int cnt = 0, rc = 0, mixer_width = 0, i, z_pos;
935 
936 	struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2];
937 	int multirect_count = 0;
938 	const struct drm_plane_state *pipe_staged[SSPP_MAX];
939 	int left_zpos_cnt = 0, right_zpos_cnt = 0;
940 	struct drm_rect crtc_rect = { 0 };
941 	bool needs_dirtyfb = dpu_crtc_needs_dirtyfb(crtc_state);
942 
943 	pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL);
944 	if (!pstates)
945 		return -ENOMEM;
946 
947 	if (!crtc_state->enable || !crtc_state->active) {
948 		DRM_DEBUG_ATOMIC("crtc%d -> enable %d, active %d, skip atomic_check\n",
949 				crtc->base.id, crtc_state->enable,
950 				crtc_state->active);
951 		memset(&cstate->new_perf, 0, sizeof(cstate->new_perf));
952 		goto end;
953 	}
954 
955 	mode = &crtc_state->adjusted_mode;
956 	DRM_DEBUG_ATOMIC("%s: check\n", dpu_crtc->name);
957 
958 	/* force a full mode set if active state changed */
959 	if (crtc_state->active_changed)
960 		crtc_state->mode_changed = true;
961 
962 	memset(pipe_staged, 0, sizeof(pipe_staged));
963 
964 	if (cstate->num_mixers) {
965 		mixer_width = mode->hdisplay / cstate->num_mixers;
966 
967 		_dpu_crtc_setup_lm_bounds(crtc, crtc_state);
968 	}
969 
970 	crtc_rect.x2 = mode->hdisplay;
971 	crtc_rect.y2 = mode->vdisplay;
972 
973 	 /* get plane state for all drm planes associated with crtc state */
974 	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
975 		struct dpu_plane_state *dpu_pstate = to_dpu_plane_state(pstate);
976 		struct drm_rect dst, clip = crtc_rect;
977 
978 		if (IS_ERR_OR_NULL(pstate)) {
979 			rc = PTR_ERR(pstate);
980 			DPU_ERROR("%s: failed to get plane%d state, %d\n",
981 					dpu_crtc->name, plane->base.id, rc);
982 			goto end;
983 		}
984 		if (cnt >= DPU_STAGE_MAX * 4)
985 			continue;
986 
987 		if (!pstate->visible)
988 			continue;
989 
990 		pstates[cnt].dpu_pstate = dpu_pstate;
991 		pstates[cnt].drm_pstate = pstate;
992 		pstates[cnt].stage = pstate->normalized_zpos;
993 		pstates[cnt].pipe_id = dpu_plane_pipe(plane);
994 
995 		dpu_pstate->needs_dirtyfb = needs_dirtyfb;
996 
997 		if (pipe_staged[pstates[cnt].pipe_id]) {
998 			multirect_plane[multirect_count].r0 =
999 				pipe_staged[pstates[cnt].pipe_id];
1000 			multirect_plane[multirect_count].r1 = pstate;
1001 			multirect_count++;
1002 
1003 			pipe_staged[pstates[cnt].pipe_id] = NULL;
1004 		} else {
1005 			pipe_staged[pstates[cnt].pipe_id] = pstate;
1006 		}
1007 
1008 		cnt++;
1009 
1010 		dst = drm_plane_state_dest(pstate);
1011 		if (!drm_rect_intersect(&clip, &dst)) {
1012 			DPU_ERROR("invalid vertical/horizontal destination\n");
1013 			DPU_ERROR("display: " DRM_RECT_FMT " plane: "
1014 				  DRM_RECT_FMT "\n", DRM_RECT_ARG(&crtc_rect),
1015 				  DRM_RECT_ARG(&dst));
1016 			rc = -E2BIG;
1017 			goto end;
1018 		}
1019 	}
1020 
1021 	for (i = 1; i < SSPP_MAX; i++) {
1022 		if (pipe_staged[i]) {
1023 			dpu_plane_clear_multirect(pipe_staged[i]);
1024 
1025 			if (is_dpu_plane_virtual(pipe_staged[i]->plane)) {
1026 				DPU_ERROR(
1027 					"r1 only virt plane:%d not supported\n",
1028 					pipe_staged[i]->plane->base.id);
1029 				rc  = -EINVAL;
1030 				goto end;
1031 			}
1032 		}
1033 	}
1034 
1035 	z_pos = -1;
1036 	for (i = 0; i < cnt; i++) {
1037 		/* reset counts at every new blend stage */
1038 		if (pstates[i].stage != z_pos) {
1039 			left_zpos_cnt = 0;
1040 			right_zpos_cnt = 0;
1041 			z_pos = pstates[i].stage;
1042 		}
1043 
1044 		/* verify z_pos setting before using it */
1045 		if (z_pos >= DPU_STAGE_MAX - DPU_STAGE_0) {
1046 			DPU_ERROR("> %d plane stages assigned\n",
1047 					DPU_STAGE_MAX - DPU_STAGE_0);
1048 			rc = -EINVAL;
1049 			goto end;
1050 		} else if (pstates[i].drm_pstate->crtc_x < mixer_width) {
1051 			if (left_zpos_cnt == 2) {
1052 				DPU_ERROR("> 2 planes @ stage %d on left\n",
1053 					z_pos);
1054 				rc = -EINVAL;
1055 				goto end;
1056 			}
1057 			left_zpos_cnt++;
1058 
1059 		} else {
1060 			if (right_zpos_cnt == 2) {
1061 				DPU_ERROR("> 2 planes @ stage %d on right\n",
1062 					z_pos);
1063 				rc = -EINVAL;
1064 				goto end;
1065 			}
1066 			right_zpos_cnt++;
1067 		}
1068 
1069 		pstates[i].dpu_pstate->stage = z_pos + DPU_STAGE_0;
1070 		DRM_DEBUG_ATOMIC("%s: zpos %d\n", dpu_crtc->name, z_pos);
1071 	}
1072 
1073 	for (i = 0; i < multirect_count; i++) {
1074 		if (dpu_plane_validate_multirect_v2(&multirect_plane[i])) {
1075 			DPU_ERROR(
1076 			"multirect validation failed for planes (%d - %d)\n",
1077 					multirect_plane[i].r0->plane->base.id,
1078 					multirect_plane[i].r1->plane->base.id);
1079 			rc = -EINVAL;
1080 			goto end;
1081 		}
1082 	}
1083 
1084 	atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
1085 
1086 	rc = dpu_core_perf_crtc_check(crtc, crtc_state);
1087 	if (rc) {
1088 		DPU_ERROR("crtc%d failed performance check %d\n",
1089 				crtc->base.id, rc);
1090 		goto end;
1091 	}
1092 
1093 	/* validate source split:
1094 	 * use pstates sorted by stage to check planes on same stage
1095 	 * we assume that all pipes are in source split so its valid to compare
1096 	 * without taking into account left/right mixer placement
1097 	 */
1098 	for (i = 1; i < cnt; i++) {
1099 		struct plane_state *prv_pstate, *cur_pstate;
1100 		struct drm_rect left_rect, right_rect;
1101 		int32_t left_pid, right_pid;
1102 		int32_t stage;
1103 
1104 		prv_pstate = &pstates[i - 1];
1105 		cur_pstate = &pstates[i];
1106 		if (prv_pstate->stage != cur_pstate->stage)
1107 			continue;
1108 
1109 		stage = cur_pstate->stage;
1110 
1111 		left_pid = prv_pstate->dpu_pstate->base.plane->base.id;
1112 		left_rect = drm_plane_state_dest(prv_pstate->drm_pstate);
1113 
1114 		right_pid = cur_pstate->dpu_pstate->base.plane->base.id;
1115 		right_rect = drm_plane_state_dest(cur_pstate->drm_pstate);
1116 
1117 		if (right_rect.x1 < left_rect.x1) {
1118 			swap(left_pid, right_pid);
1119 			swap(left_rect, right_rect);
1120 		}
1121 
1122 		/**
1123 		 * - planes are enumerated in pipe-priority order such that
1124 		 *   planes with lower drm_id must be left-most in a shared
1125 		 *   blend-stage when using source split.
1126 		 * - planes in source split must be contiguous in width
1127 		 * - planes in source split must have same dest yoff and height
1128 		 */
1129 		if (right_pid < left_pid) {
1130 			DPU_ERROR(
1131 				"invalid src split cfg. priority mismatch. stage: %d left: %d right: %d\n",
1132 				stage, left_pid, right_pid);
1133 			rc = -EINVAL;
1134 			goto end;
1135 		} else if (right_rect.x1 != drm_rect_width(&left_rect)) {
1136 			DPU_ERROR("non-contiguous coordinates for src split. "
1137 				  "stage: %d left: " DRM_RECT_FMT " right: "
1138 				  DRM_RECT_FMT "\n", stage,
1139 				  DRM_RECT_ARG(&left_rect),
1140 				  DRM_RECT_ARG(&right_rect));
1141 			rc = -EINVAL;
1142 			goto end;
1143 		} else if (left_rect.y1 != right_rect.y1 ||
1144 			   drm_rect_height(&left_rect) != drm_rect_height(&right_rect)) {
1145 			DPU_ERROR("source split at stage: %d. invalid "
1146 				  "yoff/height: left: " DRM_RECT_FMT " right: "
1147 				  DRM_RECT_FMT "\n", stage,
1148 				  DRM_RECT_ARG(&left_rect),
1149 				  DRM_RECT_ARG(&right_rect));
1150 			rc = -EINVAL;
1151 			goto end;
1152 		}
1153 	}
1154 
1155 end:
1156 	kfree(pstates);
1157 	return rc;
1158 }
1159 
dpu_crtc_vblank(struct drm_crtc * crtc,bool en)1160 int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
1161 {
1162 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1163 	struct drm_encoder *enc;
1164 
1165 	trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc);
1166 
1167 	/*
1168 	 * Normally we would iterate through encoder_mask in crtc state to find
1169 	 * attached encoders. In this case, we might be disabling vblank _after_
1170 	 * encoder_mask has been cleared.
1171 	 *
1172 	 * Instead, we "assign" a crtc to the encoder in enable and clear it in
1173 	 * disable (which is also after encoder_mask is cleared). So instead of
1174 	 * using encoder mask, we'll ask the encoder to toggle itself iff it's
1175 	 * currently assigned to our crtc.
1176 	 *
1177 	 * Note also that this function cannot be called while crtc is disabled
1178 	 * since we use drm_crtc_vblank_on/off. So we don't need to worry
1179 	 * about the assigned crtcs being inconsistent with the current state
1180 	 * (which means no need to worry about modeset locks).
1181 	 */
1182 	list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
1183 		trace_dpu_crtc_vblank_enable(DRMID(crtc), DRMID(enc), en,
1184 					     dpu_crtc);
1185 
1186 		dpu_encoder_toggle_vblank_for_crtc(enc, crtc, en);
1187 	}
1188 
1189 	return 0;
1190 }
1191 
1192 #ifdef CONFIG_DEBUG_FS
_dpu_debugfs_status_show(struct seq_file * s,void * data)1193 static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
1194 {
1195 	struct dpu_crtc *dpu_crtc;
1196 	struct dpu_plane_state *pstate = NULL;
1197 	struct dpu_crtc_mixer *m;
1198 
1199 	struct drm_crtc *crtc;
1200 	struct drm_plane *plane;
1201 	struct drm_display_mode *mode;
1202 	struct drm_framebuffer *fb;
1203 	struct drm_plane_state *state;
1204 	struct dpu_crtc_state *cstate;
1205 
1206 	int i, out_width;
1207 
1208 	dpu_crtc = s->private;
1209 	crtc = &dpu_crtc->base;
1210 
1211 	drm_modeset_lock_all(crtc->dev);
1212 	cstate = to_dpu_crtc_state(crtc->state);
1213 
1214 	mode = &crtc->state->adjusted_mode;
1215 	out_width = mode->hdisplay / cstate->num_mixers;
1216 
1217 	seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
1218 				mode->hdisplay, mode->vdisplay);
1219 
1220 	seq_puts(s, "\n");
1221 
1222 	for (i = 0; i < cstate->num_mixers; ++i) {
1223 		m = &cstate->mixers[i];
1224 		seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
1225 			m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0,
1226 			out_width, mode->vdisplay);
1227 	}
1228 
1229 	seq_puts(s, "\n");
1230 
1231 	drm_atomic_crtc_for_each_plane(plane, crtc) {
1232 		pstate = to_dpu_plane_state(plane->state);
1233 		state = plane->state;
1234 
1235 		if (!pstate || !state)
1236 			continue;
1237 
1238 		seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id,
1239 			pstate->stage);
1240 
1241 		if (plane->state->fb) {
1242 			fb = plane->state->fb;
1243 
1244 			seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u ",
1245 				fb->base.id, (char *) &fb->format->format,
1246 				fb->width, fb->height);
1247 			for (i = 0; i < ARRAY_SIZE(fb->format->cpp); ++i)
1248 				seq_printf(s, "cpp[%d]:%u ",
1249 						i, fb->format->cpp[i]);
1250 			seq_puts(s, "\n\t");
1251 
1252 			seq_printf(s, "modifier:%8llu ", fb->modifier);
1253 			seq_puts(s, "\n");
1254 
1255 			seq_puts(s, "\t");
1256 			for (i = 0; i < ARRAY_SIZE(fb->pitches); i++)
1257 				seq_printf(s, "pitches[%d]:%8u ", i,
1258 							fb->pitches[i]);
1259 			seq_puts(s, "\n");
1260 
1261 			seq_puts(s, "\t");
1262 			for (i = 0; i < ARRAY_SIZE(fb->offsets); i++)
1263 				seq_printf(s, "offsets[%d]:%8u ", i,
1264 							fb->offsets[i]);
1265 			seq_puts(s, "\n");
1266 		}
1267 
1268 		seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n",
1269 			state->src_x, state->src_y, state->src_w, state->src_h);
1270 
1271 		seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n",
1272 			state->crtc_x, state->crtc_y, state->crtc_w,
1273 			state->crtc_h);
1274 		seq_printf(s, "\tmultirect: mode: %d index: %d\n",
1275 			pstate->multirect_mode, pstate->multirect_index);
1276 
1277 		seq_puts(s, "\n");
1278 	}
1279 	if (dpu_crtc->vblank_cb_count) {
1280 		ktime_t diff = ktime_sub(ktime_get(), dpu_crtc->vblank_cb_time);
1281 		s64 diff_ms = ktime_to_ms(diff);
1282 		s64 fps = diff_ms ? div_s64(
1283 				dpu_crtc->vblank_cb_count * 1000, diff_ms) : 0;
1284 
1285 		seq_printf(s,
1286 			"vblank fps:%lld count:%u total:%llums total_framecount:%llu\n",
1287 				fps, dpu_crtc->vblank_cb_count,
1288 				ktime_to_ms(diff), dpu_crtc->play_count);
1289 
1290 		/* reset time & count for next measurement */
1291 		dpu_crtc->vblank_cb_count = 0;
1292 		dpu_crtc->vblank_cb_time = ktime_set(0, 0);
1293 	}
1294 
1295 	drm_modeset_unlock_all(crtc->dev);
1296 
1297 	return 0;
1298 }
1299 
1300 DEFINE_SHOW_ATTRIBUTE(_dpu_debugfs_status);
1301 
dpu_crtc_debugfs_state_show(struct seq_file * s,void * v)1302 static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
1303 {
1304 	struct drm_crtc *crtc = (struct drm_crtc *) s->private;
1305 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1306 
1307 	seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc));
1308 	seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
1309 	seq_printf(s, "core_clk_rate: %llu\n",
1310 			dpu_crtc->cur_perf.core_clk_rate);
1311 	seq_printf(s, "bw_ctl: %llu\n", dpu_crtc->cur_perf.bw_ctl);
1312 	seq_printf(s, "max_per_pipe_ib: %llu\n",
1313 				dpu_crtc->cur_perf.max_per_pipe_ib);
1314 
1315 	return 0;
1316 }
1317 DEFINE_SHOW_ATTRIBUTE(dpu_crtc_debugfs_state);
1318 
_dpu_crtc_init_debugfs(struct drm_crtc * crtc)1319 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1320 {
1321 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1322 
1323 	dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name,
1324 			crtc->dev->primary->debugfs_root);
1325 
1326 	debugfs_create_file("status", 0400,
1327 			dpu_crtc->debugfs_root,
1328 			dpu_crtc, &_dpu_debugfs_status_fops);
1329 	debugfs_create_file("state", 0600,
1330 			dpu_crtc->debugfs_root,
1331 			&dpu_crtc->base,
1332 			&dpu_crtc_debugfs_state_fops);
1333 
1334 	return 0;
1335 }
1336 #else
_dpu_crtc_init_debugfs(struct drm_crtc * crtc)1337 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1338 {
1339 	return 0;
1340 }
1341 #endif /* CONFIG_DEBUG_FS */
1342 
dpu_crtc_late_register(struct drm_crtc * crtc)1343 static int dpu_crtc_late_register(struct drm_crtc *crtc)
1344 {
1345 	return _dpu_crtc_init_debugfs(crtc);
1346 }
1347 
dpu_crtc_early_unregister(struct drm_crtc * crtc)1348 static void dpu_crtc_early_unregister(struct drm_crtc *crtc)
1349 {
1350 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1351 
1352 	debugfs_remove_recursive(dpu_crtc->debugfs_root);
1353 }
1354 
1355 static const struct drm_crtc_funcs dpu_crtc_funcs = {
1356 	.set_config = drm_atomic_helper_set_config,
1357 	.destroy = dpu_crtc_destroy,
1358 	.page_flip = drm_atomic_helper_page_flip,
1359 	.reset = dpu_crtc_reset,
1360 	.atomic_duplicate_state = dpu_crtc_duplicate_state,
1361 	.atomic_destroy_state = dpu_crtc_destroy_state,
1362 	.late_register = dpu_crtc_late_register,
1363 	.early_unregister = dpu_crtc_early_unregister,
1364 	.enable_vblank  = msm_crtc_enable_vblank,
1365 	.disable_vblank = msm_crtc_disable_vblank,
1366 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
1367 	.get_vblank_counter = dpu_crtc_get_vblank_counter,
1368 };
1369 
1370 static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
1371 	.atomic_disable = dpu_crtc_disable,
1372 	.atomic_enable = dpu_crtc_enable,
1373 	.atomic_check = dpu_crtc_atomic_check,
1374 	.atomic_begin = dpu_crtc_atomic_begin,
1375 	.atomic_flush = dpu_crtc_atomic_flush,
1376 	.get_scanout_position = dpu_crtc_get_scanout_position,
1377 };
1378 
1379 /* initialize crtc */
dpu_crtc_init(struct drm_device * dev,struct drm_plane * plane,struct drm_plane * cursor)1380 struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
1381 				struct drm_plane *cursor)
1382 {
1383 	struct msm_drm_private *priv = dev->dev_private;
1384 	struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
1385 	struct drm_crtc *crtc = NULL;
1386 	struct dpu_crtc *dpu_crtc = NULL;
1387 	int i;
1388 
1389 	dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL);
1390 	if (!dpu_crtc)
1391 		return ERR_PTR(-ENOMEM);
1392 
1393 	crtc = &dpu_crtc->base;
1394 	crtc->dev = dev;
1395 
1396 	spin_lock_init(&dpu_crtc->spin_lock);
1397 	atomic_set(&dpu_crtc->frame_pending, 0);
1398 
1399 	init_completion(&dpu_crtc->frame_done_comp);
1400 
1401 	INIT_LIST_HEAD(&dpu_crtc->frame_event_list);
1402 
1403 	for (i = 0; i < ARRAY_SIZE(dpu_crtc->frame_events); i++) {
1404 		INIT_LIST_HEAD(&dpu_crtc->frame_events[i].list);
1405 		list_add(&dpu_crtc->frame_events[i].list,
1406 				&dpu_crtc->frame_event_list);
1407 		kthread_init_work(&dpu_crtc->frame_events[i].work,
1408 				dpu_crtc_frame_event_work);
1409 	}
1410 
1411 	drm_crtc_init_with_planes(dev, crtc, plane, cursor, &dpu_crtc_funcs,
1412 				NULL);
1413 
1414 	drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
1415 
1416 	if (dpu_kms->catalog->dspp_count)
1417 		drm_crtc_enable_color_mgmt(crtc, 0, true, 0);
1418 
1419 	/* save user friendly CRTC name for later */
1420 	snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
1421 
1422 	/* initialize event handling */
1423 	spin_lock_init(&dpu_crtc->event_lock);
1424 
1425 	DRM_DEBUG_KMS("%s: successfully initialized crtc\n", dpu_crtc->name);
1426 	return crtc;
1427 }
1428