• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 and
6  * only version 2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  */
13 
14 #include "mdp5_kms.h"
15 #include "mdp5_ctl.h"
16 
17 /*
18  * CTL - MDP Control Pool Manager
19  *
20  * Controls are shared between all display interfaces.
21  *
22  * They are intended to be used for data path configuration.
23  * The top level register programming describes the complete data path for
24  * a specific data path ID - REG_MDP5_CTL_*(<id>, ...)
25  *
26  * Hardware capabilities determine the number of concurrent data paths
27  *
28  * In certain use cases (high-resolution dual pipe), one single CTL can be
29  * shared across multiple CRTCs.
30  */
31 
32 #define CTL_STAT_BUSY		0x1
33 #define CTL_STAT_BOOKED	0x2
34 
35 struct op_mode {
36 	struct mdp5_interface intf;
37 
38 	bool encoder_enabled;
39 	uint32_t start_mask;
40 };
41 
42 struct mdp5_ctl {
43 	struct mdp5_ctl_manager *ctlm;
44 
45 	u32 id;
46 	int lm;
47 
48 	/* CTL status bitmask */
49 	u32 status;
50 
51 	/* Operation Mode Configuration for the Pipeline */
52 	struct op_mode pipeline;
53 
54 	/* REG_MDP5_CTL_*(<id>) registers access info + lock: */
55 	spinlock_t hw_lock;
56 	u32 reg_offset;
57 
58 	/* when do CTL registers need to be flushed? (mask of trigger bits) */
59 	u32 pending_ctl_trigger;
60 
61 	bool cursor_on;
62 
63 	/* True if the current CTL has FLUSH bits pending for single FLUSH. */
64 	bool flush_pending;
65 
66 	struct mdp5_ctl *pair; /* Paired CTL to be flushed together */
67 };
68 
69 struct mdp5_ctl_manager {
70 	struct drm_device *dev;
71 
72 	/* number of CTL / Layer Mixers in this hw config: */
73 	u32 nlm;
74 	u32 nctl;
75 
76 	/* to filter out non-present bits in the current hardware config */
77 	u32 flush_hw_mask;
78 
79 	/* status for single FLUSH */
80 	bool single_flush_supported;
81 	u32 single_flush_pending_mask;
82 
83 	/* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */
84 	spinlock_t pool_lock;
85 	struct mdp5_ctl ctls[MAX_CTL];
86 };
87 
88 static inline
get_kms(struct mdp5_ctl_manager * ctl_mgr)89 struct mdp5_kms *get_kms(struct mdp5_ctl_manager *ctl_mgr)
90 {
91 	struct msm_drm_private *priv = ctl_mgr->dev->dev_private;
92 
93 	return to_mdp5_kms(to_mdp_kms(priv->kms));
94 }
95 
96 static inline
ctl_write(struct mdp5_ctl * ctl,u32 reg,u32 data)97 void ctl_write(struct mdp5_ctl *ctl, u32 reg, u32 data)
98 {
99 	struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
100 
101 	(void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
102 	mdp5_write(mdp5_kms, reg, data);
103 }
104 
105 static inline
ctl_read(struct mdp5_ctl * ctl,u32 reg)106 u32 ctl_read(struct mdp5_ctl *ctl, u32 reg)
107 {
108 	struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
109 
110 	(void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
111 	return mdp5_read(mdp5_kms, reg);
112 }
113 
set_display_intf(struct mdp5_kms * mdp5_kms,struct mdp5_interface * intf)114 static void set_display_intf(struct mdp5_kms *mdp5_kms,
115 		struct mdp5_interface *intf)
116 {
117 	unsigned long flags;
118 	u32 intf_sel;
119 
120 	spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
121 	intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
122 
123 	switch (intf->num) {
124 	case 0:
125 		intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK;
126 		intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf->type);
127 		break;
128 	case 1:
129 		intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK;
130 		intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf->type);
131 		break;
132 	case 2:
133 		intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK;
134 		intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf->type);
135 		break;
136 	case 3:
137 		intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK;
138 		intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf->type);
139 		break;
140 	default:
141 		BUG();
142 		break;
143 	}
144 
145 	mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
146 	spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
147 }
148 
set_ctl_op(struct mdp5_ctl * ctl,struct mdp5_interface * intf)149 static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_interface *intf)
150 {
151 	unsigned long flags;
152 	u32 ctl_op = 0;
153 
154 	if (!mdp5_cfg_intf_is_virtual(intf->type))
155 		ctl_op |= MDP5_CTL_OP_INTF_NUM(INTF0 + intf->num);
156 
157 	switch (intf->type) {
158 	case INTF_DSI:
159 		if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
160 			ctl_op |= MDP5_CTL_OP_CMD_MODE;
161 		break;
162 
163 	case INTF_WB:
164 		if (intf->mode == MDP5_INTF_WB_MODE_LINE)
165 			ctl_op |= MDP5_CTL_OP_MODE(MODE_WB_2_LINE);
166 		break;
167 
168 	default:
169 		break;
170 	}
171 
172 	spin_lock_irqsave(&ctl->hw_lock, flags);
173 	ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), ctl_op);
174 	spin_unlock_irqrestore(&ctl->hw_lock, flags);
175 }
176 
mdp5_ctl_set_pipeline(struct mdp5_ctl * ctl,struct mdp5_interface * intf,int lm)177 int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl,
178 		struct mdp5_interface *intf, int lm)
179 {
180 	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
181 	struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
182 
183 	if (unlikely(WARN_ON(intf->num != ctl->pipeline.intf.num))) {
184 		dev_err(mdp5_kms->dev->dev,
185 			"CTL %d is allocated by INTF %d, but used by INTF %d\n",
186 			ctl->id, ctl->pipeline.intf.num, intf->num);
187 		return -EINVAL;
188 	}
189 
190 	ctl->lm = lm;
191 
192 	memcpy(&ctl->pipeline.intf, intf, sizeof(*intf));
193 
194 	ctl->pipeline.start_mask = mdp_ctl_flush_mask_lm(ctl->lm) |
195 				   mdp_ctl_flush_mask_encoder(intf);
196 
197 	/* Virtual interfaces need not set a display intf (e.g.: Writeback) */
198 	if (!mdp5_cfg_intf_is_virtual(intf->type))
199 		set_display_intf(mdp5_kms, intf);
200 
201 	set_ctl_op(ctl, intf);
202 
203 	return 0;
204 }
205 
start_signal_needed(struct mdp5_ctl * ctl)206 static bool start_signal_needed(struct mdp5_ctl *ctl)
207 {
208 	struct op_mode *pipeline = &ctl->pipeline;
209 
210 	if (!pipeline->encoder_enabled || pipeline->start_mask != 0)
211 		return false;
212 
213 	switch (pipeline->intf.type) {
214 	case INTF_WB:
215 		return true;
216 	case INTF_DSI:
217 		return pipeline->intf.mode == MDP5_INTF_DSI_MODE_COMMAND;
218 	default:
219 		return false;
220 	}
221 }
222 
223 /*
224  * send_start_signal() - Overlay Processor Start Signal
225  *
226  * For a given control operation (display pipeline), a START signal needs to be
227  * executed in order to kick off operation and activate all layers.
228  * e.g.: DSI command mode, Writeback
229  */
send_start_signal(struct mdp5_ctl * ctl)230 static void send_start_signal(struct mdp5_ctl *ctl)
231 {
232 	unsigned long flags;
233 
234 	spin_lock_irqsave(&ctl->hw_lock, flags);
235 	ctl_write(ctl, REG_MDP5_CTL_START(ctl->id), 1);
236 	spin_unlock_irqrestore(&ctl->hw_lock, flags);
237 }
238 
refill_start_mask(struct mdp5_ctl * ctl)239 static void refill_start_mask(struct mdp5_ctl *ctl)
240 {
241 	struct op_mode *pipeline = &ctl->pipeline;
242 	struct mdp5_interface *intf = &ctl->pipeline.intf;
243 
244 	pipeline->start_mask = mdp_ctl_flush_mask_lm(ctl->lm);
245 
246 	/*
247 	 * Writeback encoder needs to program & flush
248 	 * address registers for each page flip..
249 	 */
250 	if (intf->type == INTF_WB)
251 		pipeline->start_mask |= mdp_ctl_flush_mask_encoder(intf);
252 }
253 
254 /**
255  * mdp5_ctl_set_encoder_state() - set the encoder state
256  *
257  * @enable: true, when encoder is ready for data streaming; false, otherwise.
258  *
259  * Note:
260  * This encoder state is needed to trigger START signal (data path kickoff).
261  */
mdp5_ctl_set_encoder_state(struct mdp5_ctl * ctl,bool enabled)262 int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled)
263 {
264 	if (WARN_ON(!ctl))
265 		return -EINVAL;
266 
267 	ctl->pipeline.encoder_enabled = enabled;
268 	DBG("intf_%d: %s", ctl->pipeline.intf.num, enabled ? "on" : "off");
269 
270 	if (start_signal_needed(ctl)) {
271 		send_start_signal(ctl);
272 		refill_start_mask(ctl);
273 	}
274 
275 	return 0;
276 }
277 
278 /*
279  * Note:
280  * CTL registers need to be flushed after calling this function
281  * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
282  */
mdp5_ctl_set_cursor(struct mdp5_ctl * ctl,int cursor_id,bool enable)283 int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable)
284 {
285 	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
286 	unsigned long flags;
287 	u32 blend_cfg;
288 	int lm = ctl->lm;
289 
290 	if (unlikely(WARN_ON(lm < 0))) {
291 		dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
292 				ctl->id, lm);
293 		return -EINVAL;
294 	}
295 
296 	spin_lock_irqsave(&ctl->hw_lock, flags);
297 
298 	blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm));
299 
300 	if (enable)
301 		blend_cfg |=  MDP5_CTL_LAYER_REG_CURSOR_OUT;
302 	else
303 		blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
304 
305 	ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
306 	ctl->cursor_on = enable;
307 
308 	spin_unlock_irqrestore(&ctl->hw_lock, flags);
309 
310 	ctl->pending_ctl_trigger = mdp_ctl_flush_mask_cursor(cursor_id);
311 
312 	return 0;
313 }
314 
mdp_ctl_blend_mask(enum mdp5_pipe pipe,enum mdp_mixer_stage_id stage)315 static u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
316 		enum mdp_mixer_stage_id stage)
317 {
318 	switch (pipe) {
319 	case SSPP_VIG0: return MDP5_CTL_LAYER_REG_VIG0(stage);
320 	case SSPP_VIG1: return MDP5_CTL_LAYER_REG_VIG1(stage);
321 	case SSPP_VIG2: return MDP5_CTL_LAYER_REG_VIG2(stage);
322 	case SSPP_RGB0: return MDP5_CTL_LAYER_REG_RGB0(stage);
323 	case SSPP_RGB1: return MDP5_CTL_LAYER_REG_RGB1(stage);
324 	case SSPP_RGB2: return MDP5_CTL_LAYER_REG_RGB2(stage);
325 	case SSPP_DMA0: return MDP5_CTL_LAYER_REG_DMA0(stage);
326 	case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage);
327 	case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage);
328 	case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage);
329 	default:	return 0;
330 	}
331 }
332 
mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe,enum mdp_mixer_stage_id stage)333 static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe,
334 		enum mdp_mixer_stage_id stage)
335 {
336 	if (stage < STAGE6)
337 		return 0;
338 
339 	switch (pipe) {
340 	case SSPP_VIG0: return MDP5_CTL_LAYER_EXT_REG_VIG0_BIT3;
341 	case SSPP_VIG1: return MDP5_CTL_LAYER_EXT_REG_VIG1_BIT3;
342 	case SSPP_VIG2: return MDP5_CTL_LAYER_EXT_REG_VIG2_BIT3;
343 	case SSPP_RGB0: return MDP5_CTL_LAYER_EXT_REG_RGB0_BIT3;
344 	case SSPP_RGB1: return MDP5_CTL_LAYER_EXT_REG_RGB1_BIT3;
345 	case SSPP_RGB2: return MDP5_CTL_LAYER_EXT_REG_RGB2_BIT3;
346 	case SSPP_DMA0: return MDP5_CTL_LAYER_EXT_REG_DMA0_BIT3;
347 	case SSPP_DMA1: return MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3;
348 	case SSPP_VIG3: return MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3;
349 	case SSPP_RGB3: return MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3;
350 	default:	return 0;
351 	}
352 }
353 
mdp5_ctl_blend(struct mdp5_ctl * ctl,u8 * stage,u32 stage_cnt,u32 ctl_blend_op_flags)354 int mdp5_ctl_blend(struct mdp5_ctl *ctl, u8 *stage, u32 stage_cnt,
355 	u32 ctl_blend_op_flags)
356 {
357 	unsigned long flags;
358 	u32 blend_cfg = 0, blend_ext_cfg = 0;
359 	int i, start_stage;
360 
361 	if (ctl_blend_op_flags & MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT) {
362 		start_stage = STAGE0;
363 		blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
364 	} else {
365 		start_stage = STAGE_BASE;
366 	}
367 
368 	for (i = start_stage; i < start_stage + stage_cnt; i++) {
369 		blend_cfg |= mdp_ctl_blend_mask(stage[i], i);
370 		blend_ext_cfg |= mdp_ctl_blend_ext_mask(stage[i], i);
371 	}
372 
373 	spin_lock_irqsave(&ctl->hw_lock, flags);
374 	if (ctl->cursor_on)
375 		blend_cfg |=  MDP5_CTL_LAYER_REG_CURSOR_OUT;
376 
377 	ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, ctl->lm), blend_cfg);
378 	ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, ctl->lm), blend_ext_cfg);
379 	spin_unlock_irqrestore(&ctl->hw_lock, flags);
380 
381 	ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(ctl->lm);
382 
383 	DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", ctl->lm,
384 		blend_cfg, blend_ext_cfg);
385 
386 	return 0;
387 }
388 
mdp_ctl_flush_mask_encoder(struct mdp5_interface * intf)389 u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf)
390 {
391 	if (intf->type == INTF_WB)
392 		return MDP5_CTL_FLUSH_WB;
393 
394 	switch (intf->num) {
395 	case 0: return MDP5_CTL_FLUSH_TIMING_0;
396 	case 1: return MDP5_CTL_FLUSH_TIMING_1;
397 	case 2: return MDP5_CTL_FLUSH_TIMING_2;
398 	case 3: return MDP5_CTL_FLUSH_TIMING_3;
399 	default: return 0;
400 	}
401 }
402 
mdp_ctl_flush_mask_cursor(int cursor_id)403 u32 mdp_ctl_flush_mask_cursor(int cursor_id)
404 {
405 	switch (cursor_id) {
406 	case 0: return MDP5_CTL_FLUSH_CURSOR_0;
407 	case 1: return MDP5_CTL_FLUSH_CURSOR_1;
408 	default: return 0;
409 	}
410 }
411 
mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe)412 u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe)
413 {
414 	switch (pipe) {
415 	case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
416 	case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
417 	case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
418 	case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
419 	case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
420 	case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
421 	case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
422 	case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
423 	case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
424 	case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
425 	default:        return 0;
426 	}
427 }
428 
mdp_ctl_flush_mask_lm(int lm)429 u32 mdp_ctl_flush_mask_lm(int lm)
430 {
431 	switch (lm) {
432 	case 0:  return MDP5_CTL_FLUSH_LM0;
433 	case 1:  return MDP5_CTL_FLUSH_LM1;
434 	case 2:  return MDP5_CTL_FLUSH_LM2;
435 	case 5:  return MDP5_CTL_FLUSH_LM5;
436 	default: return 0;
437 	}
438 }
439 
fix_sw_flush(struct mdp5_ctl * ctl,u32 flush_mask)440 static u32 fix_sw_flush(struct mdp5_ctl *ctl, u32 flush_mask)
441 {
442 	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
443 	u32 sw_mask = 0;
444 #define BIT_NEEDS_SW_FIX(bit) \
445 	(!(ctl_mgr->flush_hw_mask & bit) && (flush_mask & bit))
446 
447 	/* for some targets, cursor bit is the same as LM bit */
448 	if (BIT_NEEDS_SW_FIX(MDP5_CTL_FLUSH_CURSOR_0))
449 		sw_mask |= mdp_ctl_flush_mask_lm(ctl->lm);
450 
451 	return sw_mask;
452 }
453 
fix_for_single_flush(struct mdp5_ctl * ctl,u32 * flush_mask,u32 * flush_id)454 static void fix_for_single_flush(struct mdp5_ctl *ctl, u32 *flush_mask,
455 		u32 *flush_id)
456 {
457 	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
458 
459 	if (ctl->pair) {
460 		DBG("CTL %d FLUSH pending mask %x", ctl->id, *flush_mask);
461 		ctl->flush_pending = true;
462 		ctl_mgr->single_flush_pending_mask |= (*flush_mask);
463 		*flush_mask = 0;
464 
465 		if (ctl->pair->flush_pending) {
466 			*flush_id = min_t(u32, ctl->id, ctl->pair->id);
467 			*flush_mask = ctl_mgr->single_flush_pending_mask;
468 
469 			ctl->flush_pending = false;
470 			ctl->pair->flush_pending = false;
471 			ctl_mgr->single_flush_pending_mask = 0;
472 
473 			DBG("Single FLUSH mask %x,ID %d", *flush_mask,
474 				*flush_id);
475 		}
476 	}
477 }
478 
479 /**
480  * mdp5_ctl_commit() - Register Flush
481  *
482  * The flush register is used to indicate several registers are all
483  * programmed, and are safe to update to the back copy of the double
484  * buffered registers.
485  *
486  * Some registers FLUSH bits are shared when the hardware does not have
487  * dedicated bits for them; handling these is the job of fix_sw_flush().
488  *
489  * CTL registers need to be flushed in some circumstances; if that is the
490  * case, some trigger bits will be present in both flush mask and
491  * ctl->pending_ctl_trigger.
492  *
493  * Return H/W flushed bit mask.
494  */
mdp5_ctl_commit(struct mdp5_ctl * ctl,u32 flush_mask)495 u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
496 {
497 	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
498 	struct op_mode *pipeline = &ctl->pipeline;
499 	unsigned long flags;
500 	u32 flush_id = ctl->id;
501 	u32 curr_ctl_flush_mask;
502 
503 	pipeline->start_mask &= ~flush_mask;
504 
505 	VERB("flush_mask=%x, start_mask=%x, trigger=%x", flush_mask,
506 			pipeline->start_mask, ctl->pending_ctl_trigger);
507 
508 	if (ctl->pending_ctl_trigger & flush_mask) {
509 		flush_mask |= MDP5_CTL_FLUSH_CTL;
510 		ctl->pending_ctl_trigger = 0;
511 	}
512 
513 	flush_mask |= fix_sw_flush(ctl, flush_mask);
514 
515 	flush_mask &= ctl_mgr->flush_hw_mask;
516 
517 	curr_ctl_flush_mask = flush_mask;
518 
519 	fix_for_single_flush(ctl, &flush_mask, &flush_id);
520 
521 	if (flush_mask) {
522 		spin_lock_irqsave(&ctl->hw_lock, flags);
523 		ctl_write(ctl, REG_MDP5_CTL_FLUSH(flush_id), flush_mask);
524 		spin_unlock_irqrestore(&ctl->hw_lock, flags);
525 	}
526 
527 	if (start_signal_needed(ctl)) {
528 		send_start_signal(ctl);
529 		refill_start_mask(ctl);
530 	}
531 
532 	return curr_ctl_flush_mask;
533 }
534 
mdp5_ctl_get_commit_status(struct mdp5_ctl * ctl)535 u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl)
536 {
537 	return ctl_read(ctl, REG_MDP5_CTL_FLUSH(ctl->id));
538 }
539 
mdp5_ctl_get_ctl_id(struct mdp5_ctl * ctl)540 int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl)
541 {
542 	return WARN_ON(!ctl) ? -EINVAL : ctl->id;
543 }
544 
545 /*
546  * mdp5_ctl_pair() - Associate 2 booked CTLs for single FLUSH
547  */
mdp5_ctl_pair(struct mdp5_ctl * ctlx,struct mdp5_ctl * ctly,bool enable)548 int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable)
549 {
550 	struct mdp5_ctl_manager *ctl_mgr = ctlx->ctlm;
551 	struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
552 
553 	/* do nothing silently if hw doesn't support */
554 	if (!ctl_mgr->single_flush_supported)
555 		return 0;
556 
557 	if (!enable) {
558 		ctlx->pair = NULL;
559 		ctly->pair = NULL;
560 		mdp5_write(mdp5_kms, REG_MDP5_SPARE_0, 0);
561 		return 0;
562 	} else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) {
563 		dev_err(ctl_mgr->dev->dev, "CTLs already paired\n");
564 		return -EINVAL;
565 	} else if (!(ctlx->status & ctly->status & CTL_STAT_BOOKED)) {
566 		dev_err(ctl_mgr->dev->dev, "Only pair booked CTLs\n");
567 		return -EINVAL;
568 	}
569 
570 	ctlx->pair = ctly;
571 	ctly->pair = ctlx;
572 
573 	mdp5_write(mdp5_kms, REG_MDP5_SPARE_0,
574 		   MDP5_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN);
575 
576 	return 0;
577 }
578 
579 /*
580  * mdp5_ctl_request() - CTL allocation
581  *
582  * Try to return booked CTL for @intf_num is 1 or 2, unbooked for other INTFs.
583  * If no CTL is available in preferred category, allocate from the other one.
584  *
585  * @return fail if no CTL is available.
586  */
mdp5_ctlm_request(struct mdp5_ctl_manager * ctl_mgr,int intf_num)587 struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
588 		int intf_num)
589 {
590 	struct mdp5_ctl *ctl = NULL;
591 	const u32 checkm = CTL_STAT_BUSY | CTL_STAT_BOOKED;
592 	u32 match = ((intf_num == 1) || (intf_num == 2)) ? CTL_STAT_BOOKED : 0;
593 	unsigned long flags;
594 	int c;
595 
596 	spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
597 
598 	/* search the preferred */
599 	for (c = 0; c < ctl_mgr->nctl; c++)
600 		if ((ctl_mgr->ctls[c].status & checkm) == match)
601 			goto found;
602 
603 	dev_warn(ctl_mgr->dev->dev,
604 		"fall back to the other CTL category for INTF %d!\n", intf_num);
605 
606 	match ^= CTL_STAT_BOOKED;
607 	for (c = 0; c < ctl_mgr->nctl; c++)
608 		if ((ctl_mgr->ctls[c].status & checkm) == match)
609 			goto found;
610 
611 	dev_err(ctl_mgr->dev->dev, "No more CTL available!");
612 	goto unlock;
613 
614 found:
615 	ctl = &ctl_mgr->ctls[c];
616 	ctl->pipeline.intf.num = intf_num;
617 	ctl->lm = -1;
618 	ctl->status |= CTL_STAT_BUSY;
619 	ctl->pending_ctl_trigger = 0;
620 	DBG("CTL %d allocated", ctl->id);
621 
622 unlock:
623 	spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
624 	return ctl;
625 }
626 
mdp5_ctlm_hw_reset(struct mdp5_ctl_manager * ctl_mgr)627 void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctl_mgr)
628 {
629 	unsigned long flags;
630 	int c;
631 
632 	for (c = 0; c < ctl_mgr->nctl; c++) {
633 		struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
634 
635 		spin_lock_irqsave(&ctl->hw_lock, flags);
636 		ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), 0);
637 		spin_unlock_irqrestore(&ctl->hw_lock, flags);
638 	}
639 }
640 
mdp5_ctlm_destroy(struct mdp5_ctl_manager * ctl_mgr)641 void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr)
642 {
643 	kfree(ctl_mgr);
644 }
645 
mdp5_ctlm_init(struct drm_device * dev,void __iomem * mmio_base,struct mdp5_cfg_handler * cfg_hnd)646 struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
647 		void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd)
648 {
649 	struct mdp5_ctl_manager *ctl_mgr;
650 	const struct mdp5_cfg_hw *hw_cfg = mdp5_cfg_get_hw_config(cfg_hnd);
651 	int rev = mdp5_cfg_get_hw_rev(cfg_hnd);
652 	const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl;
653 	unsigned long flags;
654 	int c, ret;
655 
656 	ctl_mgr = kzalloc(sizeof(*ctl_mgr), GFP_KERNEL);
657 	if (!ctl_mgr) {
658 		dev_err(dev->dev, "failed to allocate CTL manager\n");
659 		ret = -ENOMEM;
660 		goto fail;
661 	}
662 
663 	if (unlikely(WARN_ON(ctl_cfg->count > MAX_CTL))) {
664 		dev_err(dev->dev, "Increase static pool size to at least %d\n",
665 				ctl_cfg->count);
666 		ret = -ENOSPC;
667 		goto fail;
668 	}
669 
670 	/* initialize the CTL manager: */
671 	ctl_mgr->dev = dev;
672 	ctl_mgr->nlm = hw_cfg->lm.count;
673 	ctl_mgr->nctl = ctl_cfg->count;
674 	ctl_mgr->flush_hw_mask = ctl_cfg->flush_hw_mask;
675 	spin_lock_init(&ctl_mgr->pool_lock);
676 
677 	/* initialize each CTL of the pool: */
678 	spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
679 	for (c = 0; c < ctl_mgr->nctl; c++) {
680 		struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
681 
682 		if (WARN_ON(!ctl_cfg->base[c])) {
683 			dev_err(dev->dev, "CTL_%d: base is null!\n", c);
684 			ret = -EINVAL;
685 			spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
686 			goto fail;
687 		}
688 		ctl->ctlm = ctl_mgr;
689 		ctl->id = c;
690 		ctl->reg_offset = ctl_cfg->base[c];
691 		ctl->status = 0;
692 		spin_lock_init(&ctl->hw_lock);
693 	}
694 
695 	/*
696 	 * In Dual DSI case, CTL0 and CTL1 are always assigned to two DSI
697 	 * interfaces to support single FLUSH feature (Flush CTL0 and CTL1 when
698 	 * only write into CTL0's FLUSH register) to keep two DSI pipes in sync.
699 	 * Single FLUSH is supported from hw rev v3.0.
700 	 */
701 	if (rev >= 3) {
702 		ctl_mgr->single_flush_supported = true;
703 		/* Reserve CTL0/1 for INTF1/2 */
704 		ctl_mgr->ctls[0].status |= CTL_STAT_BOOKED;
705 		ctl_mgr->ctls[1].status |= CTL_STAT_BOOKED;
706 	}
707 	spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
708 	DBG("Pool of %d CTLs created.", ctl_mgr->nctl);
709 
710 	return ctl_mgr;
711 
712 fail:
713 	if (ctl_mgr)
714 		mdp5_ctlm_destroy(ctl_mgr);
715 
716 	return ERR_PTR(ret);
717 }
718