• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
4  * Author: James.Qian.Wang <james.qian.wang@arm.com>
5  *
6  */
7 
8 #include <drm/drm_print.h>
9 #include <linux/clk.h>
10 #include "komeda_dev.h"
11 #include "komeda_kms.h"
12 #include "komeda_pipeline.h"
13 #include "komeda_framebuffer.h"
14 
is_switching_user(void * old,void * new)15 static inline bool is_switching_user(void *old, void *new)
16 {
17 	if (!old || !new)
18 		return false;
19 
20 	return old != new;
21 }
22 
23 static struct komeda_pipeline_state *
komeda_pipeline_get_state(struct komeda_pipeline * pipe,struct drm_atomic_state * state)24 komeda_pipeline_get_state(struct komeda_pipeline *pipe,
25 			  struct drm_atomic_state *state)
26 {
27 	struct drm_private_state *priv_st;
28 
29 	priv_st = drm_atomic_get_private_obj_state(state, &pipe->obj);
30 	if (IS_ERR(priv_st))
31 		return ERR_CAST(priv_st);
32 
33 	return priv_to_pipe_st(priv_st);
34 }
35 
36 struct komeda_pipeline_state *
komeda_pipeline_get_old_state(struct komeda_pipeline * pipe,struct drm_atomic_state * state)37 komeda_pipeline_get_old_state(struct komeda_pipeline *pipe,
38 			      struct drm_atomic_state *state)
39 {
40 	struct drm_private_state *priv_st;
41 
42 	priv_st = drm_atomic_get_old_private_obj_state(state, &pipe->obj);
43 	if (priv_st)
44 		return priv_to_pipe_st(priv_st);
45 	return NULL;
46 }
47 
48 static struct komeda_pipeline_state *
komeda_pipeline_get_new_state(struct komeda_pipeline * pipe,struct drm_atomic_state * state)49 komeda_pipeline_get_new_state(struct komeda_pipeline *pipe,
50 			      struct drm_atomic_state *state)
51 {
52 	struct drm_private_state *priv_st;
53 
54 	priv_st = drm_atomic_get_new_private_obj_state(state, &pipe->obj);
55 	if (priv_st)
56 		return priv_to_pipe_st(priv_st);
57 	return NULL;
58 }
59 
60 /* Assign pipeline for crtc */
61 static struct komeda_pipeline_state *
komeda_pipeline_get_state_and_set_crtc(struct komeda_pipeline * pipe,struct drm_atomic_state * state,struct drm_crtc * crtc)62 komeda_pipeline_get_state_and_set_crtc(struct komeda_pipeline *pipe,
63 				       struct drm_atomic_state *state,
64 				       struct drm_crtc *crtc)
65 {
66 	struct komeda_pipeline_state *st;
67 
68 	st = komeda_pipeline_get_state(pipe, state);
69 	if (IS_ERR(st))
70 		return st;
71 
72 	if (is_switching_user(crtc, st->crtc)) {
73 		DRM_DEBUG_ATOMIC("CRTC%d required pipeline%d is busy.\n",
74 				 drm_crtc_index(crtc), pipe->id);
75 		return ERR_PTR(-EBUSY);
76 	}
77 
78 	/* pipeline only can be disabled when the it is free or unused */
79 	if (!crtc && st->active_comps) {
80 		DRM_DEBUG_ATOMIC("Disabling a busy pipeline:%d.\n", pipe->id);
81 		return ERR_PTR(-EBUSY);
82 	}
83 
84 	st->crtc = crtc;
85 
86 	if (crtc) {
87 		struct komeda_crtc_state *kcrtc_st;
88 
89 		kcrtc_st = to_kcrtc_st(drm_atomic_get_new_crtc_state(state,
90 								     crtc));
91 
92 		kcrtc_st->active_pipes |= BIT(pipe->id);
93 		kcrtc_st->affected_pipes |= BIT(pipe->id);
94 	}
95 	return st;
96 }
97 
98 static struct komeda_component_state *
komeda_component_get_state(struct komeda_component * c,struct drm_atomic_state * state)99 komeda_component_get_state(struct komeda_component *c,
100 			   struct drm_atomic_state *state)
101 {
102 	struct drm_private_state *priv_st;
103 
104 	WARN_ON(!drm_modeset_is_locked(&c->pipeline->obj.lock));
105 
106 	priv_st = drm_atomic_get_private_obj_state(state, &c->obj);
107 	if (IS_ERR(priv_st))
108 		return ERR_CAST(priv_st);
109 
110 	return priv_to_comp_st(priv_st);
111 }
112 
113 static struct komeda_component_state *
komeda_component_get_old_state(struct komeda_component * c,struct drm_atomic_state * state)114 komeda_component_get_old_state(struct komeda_component *c,
115 			       struct drm_atomic_state *state)
116 {
117 	struct drm_private_state *priv_st;
118 
119 	priv_st = drm_atomic_get_old_private_obj_state(state, &c->obj);
120 	if (priv_st)
121 		return priv_to_comp_st(priv_st);
122 	return NULL;
123 }
124 
125 /**
126  * komeda_component_get_state_and_set_user()
127  *
128  * @c: component to get state and set user
129  * @state: global atomic state
130  * @user: direct user, the binding user
131  * @crtc: the CRTC user, the big boss :)
132  *
133  * This function accepts two users:
134  * -   The direct user: can be plane/crtc/wb_connector depends on component
135  * -   The big boss (CRTC)
136  * CRTC is the big boss (the final user), because all component resources
137  * eventually will be assigned to CRTC, like the layer will be binding to
138  * kms_plane, but kms plane will be binding to a CRTC eventually.
139  *
140  * The big boss (CRTC) is for pipeline assignment, since &komeda_component isn't
141  * independent and can be assigned to CRTC freely, but belongs to a specific
142  * pipeline, only pipeline can be shared between crtc, and pipeline as a whole
143  * (include all the internal components) assigned to a specific CRTC.
144  *
145  * So when set a user to komeda_component, need first to check the status of
146  * component->pipeline to see if the pipeline is available on this specific
147  * CRTC. if the pipeline is busy (assigned to another CRTC), even the required
148  * component is free, the component still cannot be assigned to the direct user.
149  */
150 static struct komeda_component_state *
komeda_component_get_state_and_set_user(struct komeda_component * c,struct drm_atomic_state * state,void * user,struct drm_crtc * crtc)151 komeda_component_get_state_and_set_user(struct komeda_component *c,
152 					struct drm_atomic_state *state,
153 					void *user,
154 					struct drm_crtc *crtc)
155 {
156 	struct komeda_pipeline_state *pipe_st;
157 	struct komeda_component_state *st;
158 
159 	/* First check if the pipeline is available */
160 	pipe_st = komeda_pipeline_get_state_and_set_crtc(c->pipeline,
161 							 state, crtc);
162 	if (IS_ERR(pipe_st))
163 		return ERR_CAST(pipe_st);
164 
165 	st = komeda_component_get_state(c, state);
166 	if (IS_ERR(st))
167 		return st;
168 
169 	/* check if the component has been occupied */
170 	if (is_switching_user(user, st->binding_user)) {
171 		DRM_DEBUG_ATOMIC("required %s is busy.\n", c->name);
172 		return ERR_PTR(-EBUSY);
173 	}
174 
175 	st->binding_user = user;
176 	/* mark the component as active if user is valid */
177 	if (st->binding_user)
178 		pipe_st->active_comps |= BIT(c->id);
179 
180 	return st;
181 }
182 
183 static void
komeda_component_add_input(struct komeda_component_state * state,struct komeda_component_output * input,int idx)184 komeda_component_add_input(struct komeda_component_state *state,
185 			   struct komeda_component_output *input,
186 			   int idx)
187 {
188 	struct komeda_component *c = state->component;
189 
190 	WARN_ON((idx < 0 || idx >= c->max_active_inputs));
191 
192 	/* since the inputs[i] is only valid when it is active. So if a input[i]
193 	 * is a newly enabled input which switches from disable to enable, then
194 	 * the old inputs[i] is undefined (NOT zeroed), we can not rely on
195 	 * memcmp, but directly mark it changed
196 	 */
197 	if (!has_bit(idx, state->affected_inputs) ||
198 	    memcmp(&state->inputs[idx], input, sizeof(*input))) {
199 		memcpy(&state->inputs[idx], input, sizeof(*input));
200 		state->changed_active_inputs |= BIT(idx);
201 	}
202 	state->active_inputs |= BIT(idx);
203 	state->affected_inputs |= BIT(idx);
204 }
205 
206 static int
komeda_component_check_input(struct komeda_component_state * state,struct komeda_component_output * input,int idx)207 komeda_component_check_input(struct komeda_component_state *state,
208 			     struct komeda_component_output *input,
209 			     int idx)
210 {
211 	struct komeda_component *c = state->component;
212 
213 	if ((idx < 0) || (idx >= c->max_active_inputs)) {
214 		DRM_DEBUG_ATOMIC("%s required an invalid %s-input[%d].\n",
215 				 input->component->name, c->name, idx);
216 		return -EINVAL;
217 	}
218 
219 	if (has_bit(idx, state->active_inputs)) {
220 		DRM_DEBUG_ATOMIC("%s required %s-input[%d] has been occupied already.\n",
221 				 input->component->name, c->name, idx);
222 		return -EINVAL;
223 	}
224 
225 	return 0;
226 }
227 
228 static void
komeda_component_set_output(struct komeda_component_output * output,struct komeda_component * comp,u8 output_port)229 komeda_component_set_output(struct komeda_component_output *output,
230 			    struct komeda_component *comp,
231 			    u8 output_port)
232 {
233 	output->component = comp;
234 	output->output_port = output_port;
235 }
236 
237 static int
komeda_component_validate_private(struct komeda_component * c,struct komeda_component_state * st)238 komeda_component_validate_private(struct komeda_component *c,
239 				  struct komeda_component_state *st)
240 {
241 	int err;
242 
243 	if (!c->funcs->validate)
244 		return 0;
245 
246 	err = c->funcs->validate(c, st);
247 	if (err)
248 		DRM_DEBUG_ATOMIC("%s validate private failed.\n", c->name);
249 
250 	return err;
251 }
252 
253 /* Get current available scaler from the component->supported_outputs */
254 static struct komeda_scaler *
komeda_component_get_avail_scaler(struct komeda_component * c,struct drm_atomic_state * state)255 komeda_component_get_avail_scaler(struct komeda_component *c,
256 				  struct drm_atomic_state *state)
257 {
258 	struct komeda_pipeline_state *pipe_st;
259 	u32 avail_scalers;
260 
261 	pipe_st = komeda_pipeline_get_state(c->pipeline, state);
262 	if (!pipe_st)
263 		return NULL;
264 
265 	avail_scalers = (pipe_st->active_comps & KOMEDA_PIPELINE_SCALERS) ^
266 			KOMEDA_PIPELINE_SCALERS;
267 
268 	c = komeda_component_pickup_output(c, avail_scalers);
269 
270 	return to_scaler(c);
271 }
272 
273 static void
komeda_rotate_data_flow(struct komeda_data_flow_cfg * dflow,u32 rot)274 komeda_rotate_data_flow(struct komeda_data_flow_cfg *dflow, u32 rot)
275 {
276 	if (drm_rotation_90_or_270(rot)) {
277 		swap(dflow->in_h, dflow->in_w);
278 		swap(dflow->total_in_h, dflow->total_in_w);
279 	}
280 }
281 
282 static int
komeda_layer_check_cfg(struct komeda_layer * layer,struct komeda_fb * kfb,struct komeda_data_flow_cfg * dflow)283 komeda_layer_check_cfg(struct komeda_layer *layer,
284 		       struct komeda_fb *kfb,
285 		       struct komeda_data_flow_cfg *dflow)
286 {
287 	u32 src_x, src_y, src_w, src_h;
288 	u32 line_sz, max_line_sz;
289 
290 	if (!komeda_fb_is_layer_supported(kfb, layer->layer_type, dflow->rot))
291 		return -EINVAL;
292 
293 	if (layer->base.id == KOMEDA_COMPONENT_WB_LAYER) {
294 		src_x = dflow->out_x;
295 		src_y = dflow->out_y;
296 		src_w = dflow->out_w;
297 		src_h = dflow->out_h;
298 	} else {
299 		src_x = dflow->in_x;
300 		src_y = dflow->in_y;
301 		src_w = dflow->in_w;
302 		src_h = dflow->in_h;
303 	}
304 
305 	if (komeda_fb_check_src_coords(kfb, src_x, src_y, src_w, src_h))
306 		return -EINVAL;
307 
308 	if (!in_range(&layer->hsize_in, src_w)) {
309 		DRM_DEBUG_ATOMIC("invalidate src_w %d.\n", src_w);
310 		return -EINVAL;
311 	}
312 
313 	if (!in_range(&layer->vsize_in, src_h)) {
314 		DRM_DEBUG_ATOMIC("invalidate src_h %d.\n", src_h);
315 		return -EINVAL;
316 	}
317 
318 	if (drm_rotation_90_or_270(dflow->rot))
319 		line_sz = dflow->in_h;
320 	else
321 		line_sz = dflow->in_w;
322 
323 	if (kfb->base.format->hsub > 1)
324 		max_line_sz = layer->yuv_line_sz;
325 	else
326 		max_line_sz = layer->line_sz;
327 
328 	if (line_sz > max_line_sz) {
329 		DRM_DEBUG_ATOMIC("Required line_sz: %d exceeds the max size %d\n",
330 				 line_sz, max_line_sz);
331 		return -EINVAL;
332 	}
333 
334 	return 0;
335 }
336 
337 static int
komeda_layer_validate(struct komeda_layer * layer,struct komeda_plane_state * kplane_st,struct komeda_data_flow_cfg * dflow)338 komeda_layer_validate(struct komeda_layer *layer,
339 		      struct komeda_plane_state *kplane_st,
340 		      struct komeda_data_flow_cfg *dflow)
341 {
342 	struct drm_plane_state *plane_st = &kplane_st->base;
343 	struct drm_framebuffer *fb = plane_st->fb;
344 	struct komeda_fb *kfb = to_kfb(fb);
345 	struct komeda_component_state *c_st;
346 	struct komeda_layer_state *st;
347 	int i, err;
348 
349 	err = komeda_layer_check_cfg(layer, kfb, dflow);
350 	if (err)
351 		return err;
352 
353 	c_st = komeda_component_get_state_and_set_user(&layer->base,
354 			plane_st->state, plane_st->plane, plane_st->crtc);
355 	if (IS_ERR(c_st))
356 		return PTR_ERR(c_st);
357 
358 	st = to_layer_st(c_st);
359 
360 	st->rot = dflow->rot;
361 
362 	if (fb->modifier) {
363 		st->hsize = kfb->aligned_w;
364 		st->vsize = kfb->aligned_h;
365 		st->afbc_crop_l = dflow->in_x;
366 		st->afbc_crop_r = kfb->aligned_w - dflow->in_x - dflow->in_w;
367 		st->afbc_crop_t = dflow->in_y;
368 		st->afbc_crop_b = kfb->aligned_h - dflow->in_y - dflow->in_h;
369 	} else {
370 		st->hsize = dflow->in_w;
371 		st->vsize = dflow->in_h;
372 		st->afbc_crop_l = 0;
373 		st->afbc_crop_r = 0;
374 		st->afbc_crop_t = 0;
375 		st->afbc_crop_b = 0;
376 	}
377 
378 	for (i = 0; i < fb->format->num_planes; i++)
379 		st->addr[i] = komeda_fb_get_pixel_addr(kfb, dflow->in_x,
380 						       dflow->in_y, i);
381 
382 	err = komeda_component_validate_private(&layer->base, c_st);
383 	if (err)
384 		return err;
385 
386 	/* update the data flow for the next stage */
387 	komeda_component_set_output(&dflow->input, &layer->base, 0);
388 
389 	/*
390 	 * The rotation has been handled by layer, so adjusted the data flow for
391 	 * the next stage.
392 	 */
393 	komeda_rotate_data_flow(dflow, st->rot);
394 
395 	return 0;
396 }
397 
398 static int
komeda_wb_layer_validate(struct komeda_layer * wb_layer,struct drm_connector_state * conn_st,struct komeda_data_flow_cfg * dflow)399 komeda_wb_layer_validate(struct komeda_layer *wb_layer,
400 			 struct drm_connector_state *conn_st,
401 			 struct komeda_data_flow_cfg *dflow)
402 {
403 	struct komeda_fb *kfb = to_kfb(conn_st->writeback_job->fb);
404 	struct komeda_component_state *c_st;
405 	struct komeda_layer_state *st;
406 	int i, err;
407 
408 	err = komeda_layer_check_cfg(wb_layer, kfb, dflow);
409 	if (err)
410 		return err;
411 
412 	c_st = komeda_component_get_state_and_set_user(&wb_layer->base,
413 			conn_st->state, conn_st->connector, conn_st->crtc);
414 	if (IS_ERR(c_st))
415 		return PTR_ERR(c_st);
416 
417 	st = to_layer_st(c_st);
418 
419 	st->hsize = dflow->out_w;
420 	st->vsize = dflow->out_h;
421 
422 	for (i = 0; i < kfb->base.format->num_planes; i++)
423 		st->addr[i] = komeda_fb_get_pixel_addr(kfb, dflow->out_x,
424 						       dflow->out_y, i);
425 
426 	komeda_component_add_input(&st->base, &dflow->input, 0);
427 	komeda_component_set_output(&dflow->input, &wb_layer->base, 0);
428 
429 	return 0;
430 }
431 
scaling_ratio_valid(u32 size_in,u32 size_out,u32 max_upscaling,u32 max_downscaling)432 static bool scaling_ratio_valid(u32 size_in, u32 size_out,
433 				u32 max_upscaling, u32 max_downscaling)
434 {
435 	if (size_out > size_in * max_upscaling)
436 		return false;
437 	else if (size_in > size_out * max_downscaling)
438 		return false;
439 	return true;
440 }
441 
442 static int
komeda_scaler_check_cfg(struct komeda_scaler * scaler,struct komeda_crtc_state * kcrtc_st,struct komeda_data_flow_cfg * dflow)443 komeda_scaler_check_cfg(struct komeda_scaler *scaler,
444 			struct komeda_crtc_state *kcrtc_st,
445 			struct komeda_data_flow_cfg *dflow)
446 {
447 	u32 hsize_in, vsize_in, hsize_out, vsize_out;
448 	u32 max_upscaling;
449 
450 	hsize_in = dflow->in_w;
451 	vsize_in = dflow->in_h;
452 	hsize_out = dflow->out_w;
453 	vsize_out = dflow->out_h;
454 
455 	if (!in_range(&scaler->hsize, hsize_in) ||
456 	    !in_range(&scaler->hsize, hsize_out)) {
457 		DRM_DEBUG_ATOMIC("Invalid horizontal sizes");
458 		return -EINVAL;
459 	}
460 
461 	if (!in_range(&scaler->vsize, vsize_in) ||
462 	    !in_range(&scaler->vsize, vsize_out)) {
463 		DRM_DEBUG_ATOMIC("Invalid vertical sizes");
464 		return -EINVAL;
465 	}
466 
467 	/* If input comes from compiz that means the scaling is for writeback
468 	 * and scaler can not do upscaling for writeback
469 	 */
470 	if (has_bit(dflow->input.component->id, KOMEDA_PIPELINE_COMPIZS))
471 		max_upscaling = 1;
472 	else
473 		max_upscaling = scaler->max_upscaling;
474 
475 	if (!scaling_ratio_valid(hsize_in, hsize_out, max_upscaling,
476 				 scaler->max_downscaling)) {
477 		DRM_DEBUG_ATOMIC("Invalid horizontal scaling ratio");
478 		return -EINVAL;
479 	}
480 
481 	if (!scaling_ratio_valid(vsize_in, vsize_out, max_upscaling,
482 				 scaler->max_downscaling)) {
483 		DRM_DEBUG_ATOMIC("Invalid vertical scaling ratio");
484 		return -EINVAL;
485 	}
486 
487 	if (hsize_in > hsize_out || vsize_in > vsize_out) {
488 		struct komeda_pipeline *pipe = scaler->base.pipeline;
489 		int err;
490 
491 		err = pipe->funcs->downscaling_clk_check(pipe,
492 					&kcrtc_st->base.adjusted_mode,
493 					komeda_crtc_get_aclk(kcrtc_st), dflow);
494 		if (err) {
495 			DRM_DEBUG_ATOMIC("aclk can't satisfy the clock requirement of the downscaling\n");
496 			return err;
497 		}
498 	}
499 
500 	return 0;
501 }
502 
503 static int
komeda_scaler_validate(void * user,struct komeda_crtc_state * kcrtc_st,struct komeda_data_flow_cfg * dflow)504 komeda_scaler_validate(void *user,
505 		       struct komeda_crtc_state *kcrtc_st,
506 		       struct komeda_data_flow_cfg *dflow)
507 {
508 	struct drm_atomic_state *drm_st = kcrtc_st->base.state;
509 	struct komeda_component_state *c_st;
510 	struct komeda_scaler_state *st;
511 	struct komeda_scaler *scaler;
512 	int err = 0;
513 
514 	if (!(dflow->en_scaling || dflow->en_img_enhancement))
515 		return 0;
516 
517 	scaler = komeda_component_get_avail_scaler(dflow->input.component,
518 						   drm_st);
519 	if (!scaler) {
520 		DRM_DEBUG_ATOMIC("No scaler available");
521 		return -EINVAL;
522 	}
523 
524 	err = komeda_scaler_check_cfg(scaler, kcrtc_st, dflow);
525 	if (err)
526 		return err;
527 
528 	c_st = komeda_component_get_state_and_set_user(&scaler->base,
529 			drm_st, user, kcrtc_st->base.crtc);
530 	if (IS_ERR(c_st))
531 		return PTR_ERR(c_st);
532 
533 	st = to_scaler_st(c_st);
534 
535 	st->hsize_in = dflow->in_w;
536 	st->vsize_in = dflow->in_h;
537 	st->hsize_out = dflow->out_w;
538 	st->vsize_out = dflow->out_h;
539 	st->right_crop = dflow->right_crop;
540 	st->left_crop = dflow->left_crop;
541 	st->total_vsize_in = dflow->total_in_h;
542 	st->total_hsize_in = dflow->total_in_w;
543 	st->total_hsize_out = dflow->total_out_w;
544 
545 	/* Enable alpha processing if the next stage needs the pixel alpha */
546 	st->en_alpha = dflow->pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE;
547 	st->en_scaling = dflow->en_scaling;
548 	st->en_img_enhancement = dflow->en_img_enhancement;
549 	st->en_split = dflow->en_split;
550 	st->right_part = dflow->right_part;
551 
552 	komeda_component_add_input(&st->base, &dflow->input, 0);
553 	komeda_component_set_output(&dflow->input, &scaler->base, 0);
554 	return err;
555 }
556 
557 static void komeda_split_data_flow(struct komeda_scaler *scaler,
558 				   struct komeda_data_flow_cfg *dflow,
559 				   struct komeda_data_flow_cfg *l_dflow,
560 				   struct komeda_data_flow_cfg *r_dflow);
561 
562 static int
komeda_splitter_validate(struct komeda_splitter * splitter,struct drm_connector_state * conn_st,struct komeda_data_flow_cfg * dflow,struct komeda_data_flow_cfg * l_output,struct komeda_data_flow_cfg * r_output)563 komeda_splitter_validate(struct komeda_splitter *splitter,
564 			 struct drm_connector_state *conn_st,
565 			 struct komeda_data_flow_cfg *dflow,
566 			 struct komeda_data_flow_cfg *l_output,
567 			 struct komeda_data_flow_cfg *r_output)
568 {
569 	struct komeda_component_state *c_st;
570 	struct komeda_splitter_state *st;
571 
572 	if (!splitter) {
573 		DRM_DEBUG_ATOMIC("Current HW doesn't support splitter.\n");
574 		return -EINVAL;
575 	}
576 
577 	if (!in_range(&splitter->hsize, dflow->in_w)) {
578 		DRM_DEBUG_ATOMIC("split in_w:%d is out of the acceptable range.\n",
579 				 dflow->in_w);
580 		return -EINVAL;
581 	}
582 
583 	if (!in_range(&splitter->vsize, dflow->in_h)) {
584 		DRM_DEBUG_ATOMIC("split in_h: %d exceeds the acceptable range.\n",
585 				 dflow->in_h);
586 		return -EINVAL;
587 	}
588 
589 	c_st = komeda_component_get_state_and_set_user(&splitter->base,
590 			conn_st->state, conn_st->connector, conn_st->crtc);
591 
592 	if (IS_ERR(c_st))
593 		return PTR_ERR(c_st);
594 
595 	komeda_split_data_flow(splitter->base.pipeline->scalers[0],
596 			       dflow, l_output, r_output);
597 
598 	st = to_splitter_st(c_st);
599 	st->hsize = dflow->in_w;
600 	st->vsize = dflow->in_h;
601 	st->overlap = dflow->overlap;
602 
603 	komeda_component_add_input(&st->base, &dflow->input, 0);
604 	komeda_component_set_output(&l_output->input, &splitter->base, 0);
605 	komeda_component_set_output(&r_output->input, &splitter->base, 1);
606 
607 	return 0;
608 }
609 
610 static int
komeda_merger_validate(struct komeda_merger * merger,void * user,struct komeda_crtc_state * kcrtc_st,struct komeda_data_flow_cfg * left_input,struct komeda_data_flow_cfg * right_input,struct komeda_data_flow_cfg * output)611 komeda_merger_validate(struct komeda_merger *merger,
612 		       void *user,
613 		       struct komeda_crtc_state *kcrtc_st,
614 		       struct komeda_data_flow_cfg *left_input,
615 		       struct komeda_data_flow_cfg *right_input,
616 		       struct komeda_data_flow_cfg *output)
617 {
618 	struct komeda_component_state *c_st;
619 	struct komeda_merger_state *st;
620 	int err = 0;
621 
622 	if (!merger) {
623 		DRM_DEBUG_ATOMIC("No merger is available");
624 		return -EINVAL;
625 	}
626 
627 	if (!in_range(&merger->hsize_merged, output->out_w)) {
628 		DRM_DEBUG_ATOMIC("merged_w: %d is out of the accepted range.\n",
629 				 output->out_w);
630 		return -EINVAL;
631 	}
632 
633 	if (!in_range(&merger->vsize_merged, output->out_h)) {
634 		DRM_DEBUG_ATOMIC("merged_h: %d is out of the accepted range.\n",
635 				 output->out_h);
636 		return -EINVAL;
637 	}
638 
639 	c_st = komeda_component_get_state_and_set_user(&merger->base,
640 			kcrtc_st->base.state, kcrtc_st->base.crtc, kcrtc_st->base.crtc);
641 
642 	if (IS_ERR(c_st))
643 		return PTR_ERR(c_st);
644 
645 	st = to_merger_st(c_st);
646 	st->hsize_merged = output->out_w;
647 	st->vsize_merged = output->out_h;
648 
649 	komeda_component_add_input(c_st, &left_input->input, 0);
650 	komeda_component_add_input(c_st, &right_input->input, 1);
651 	komeda_component_set_output(&output->input, &merger->base, 0);
652 
653 	return err;
654 }
655 
pipeline_composition_size(struct komeda_crtc_state * kcrtc_st,u16 * hsize,u16 * vsize)656 void pipeline_composition_size(struct komeda_crtc_state *kcrtc_st,
657 			       u16 *hsize, u16 *vsize)
658 {
659 	struct drm_display_mode *m = &kcrtc_st->base.adjusted_mode;
660 
661 	if (hsize)
662 		*hsize = m->hdisplay;
663 	if (vsize)
664 		*vsize = m->vdisplay;
665 }
666 
667 static int
komeda_compiz_set_input(struct komeda_compiz * compiz,struct komeda_crtc_state * kcrtc_st,struct komeda_data_flow_cfg * dflow)668 komeda_compiz_set_input(struct komeda_compiz *compiz,
669 			struct komeda_crtc_state *kcrtc_st,
670 			struct komeda_data_flow_cfg *dflow)
671 {
672 	struct drm_atomic_state *drm_st = kcrtc_st->base.state;
673 	struct komeda_component_state *c_st, *old_st;
674 	struct komeda_compiz_input_cfg *cin;
675 	u16 compiz_w, compiz_h;
676 	int idx = dflow->blending_zorder;
677 
678 	pipeline_composition_size(kcrtc_st, &compiz_w, &compiz_h);
679 	/* check display rect */
680 	if ((dflow->out_x + dflow->out_w > compiz_w) ||
681 	    (dflow->out_y + dflow->out_h > compiz_h) ||
682 	     dflow->out_w == 0 || dflow->out_h == 0) {
683 		DRM_DEBUG_ATOMIC("invalid disp rect [x=%d, y=%d, w=%d, h=%d]\n",
684 				 dflow->out_x, dflow->out_y,
685 				 dflow->out_w, dflow->out_h);
686 		return -EINVAL;
687 	}
688 
689 	c_st = komeda_component_get_state_and_set_user(&compiz->base, drm_st,
690 			kcrtc_st->base.crtc, kcrtc_st->base.crtc);
691 	if (IS_ERR(c_st))
692 		return PTR_ERR(c_st);
693 
694 	if (komeda_component_check_input(c_st, &dflow->input, idx))
695 		return -EINVAL;
696 
697 	cin = &(to_compiz_st(c_st)->cins[idx]);
698 
699 	cin->hsize   = dflow->out_w;
700 	cin->vsize   = dflow->out_h;
701 	cin->hoffset = dflow->out_x;
702 	cin->voffset = dflow->out_y;
703 	cin->pixel_blend_mode = dflow->pixel_blend_mode;
704 	cin->layer_alpha = dflow->layer_alpha;
705 
706 	old_st = komeda_component_get_old_state(&compiz->base, drm_st);
707 	WARN_ON(!old_st);
708 
709 	/* compare with old to check if this input has been changed */
710 	if (memcmp(&(to_compiz_st(old_st)->cins[idx]), cin, sizeof(*cin)))
711 		c_st->changed_active_inputs |= BIT(idx);
712 
713 	komeda_component_add_input(c_st, &dflow->input, idx);
714 	komeda_component_set_output(&dflow->input, &compiz->base, 0);
715 
716 	return 0;
717 }
718 
719 static int
komeda_compiz_validate(struct komeda_compiz * compiz,struct komeda_crtc_state * state,struct komeda_data_flow_cfg * dflow)720 komeda_compiz_validate(struct komeda_compiz *compiz,
721 		       struct komeda_crtc_state *state,
722 		       struct komeda_data_flow_cfg *dflow)
723 {
724 	struct komeda_component_state *c_st;
725 	struct komeda_compiz_state *st;
726 
727 	c_st = komeda_component_get_state_and_set_user(&compiz->base,
728 			state->base.state, state->base.crtc, state->base.crtc);
729 	if (IS_ERR(c_st))
730 		return PTR_ERR(c_st);
731 
732 	st = to_compiz_st(c_st);
733 
734 	pipeline_composition_size(state, &st->hsize, &st->vsize);
735 
736 	komeda_component_set_output(&dflow->input, &compiz->base, 0);
737 
738 	/* compiz output dflow will be fed to the next pipeline stage, prepare
739 	 * the data flow configuration for the next stage
740 	 */
741 	if (dflow) {
742 		dflow->in_w = st->hsize;
743 		dflow->in_h = st->vsize;
744 		dflow->out_w = dflow->in_w;
745 		dflow->out_h = dflow->in_h;
746 		/* the output data of compiz doesn't have alpha, it only can be
747 		 * used as bottom layer when blend it with master layers
748 		 */
749 		dflow->pixel_blend_mode = DRM_MODE_BLEND_PIXEL_NONE;
750 		dflow->layer_alpha = 0xFF;
751 		dflow->blending_zorder = 0;
752 	}
753 
754 	return 0;
755 }
756 
757 static int
komeda_improc_validate(struct komeda_improc * improc,struct komeda_crtc_state * kcrtc_st,struct komeda_data_flow_cfg * dflow)758 komeda_improc_validate(struct komeda_improc *improc,
759 		       struct komeda_crtc_state *kcrtc_st,
760 		       struct komeda_data_flow_cfg *dflow)
761 {
762 	struct drm_crtc *crtc = kcrtc_st->base.crtc;
763 	struct drm_crtc_state *crtc_st = &kcrtc_st->base;
764 	struct komeda_component_state *c_st;
765 	struct komeda_improc_state *st;
766 
767 	c_st = komeda_component_get_state_and_set_user(&improc->base,
768 			kcrtc_st->base.state, crtc, crtc);
769 	if (IS_ERR(c_st))
770 		return PTR_ERR(c_st);
771 
772 	st = to_improc_st(c_st);
773 
774 	st->hsize = dflow->in_w;
775 	st->vsize = dflow->in_h;
776 
777 	if (drm_atomic_crtc_needs_modeset(crtc_st)) {
778 		u32 output_depths, output_formats;
779 		u32 avail_depths, avail_formats;
780 
781 		komeda_crtc_get_color_config(crtc_st, &output_depths,
782 					     &output_formats);
783 
784 		avail_depths = output_depths & improc->supported_color_depths;
785 		if (avail_depths == 0) {
786 			DRM_DEBUG_ATOMIC("No available color depths, conn depths: 0x%x & display: 0x%x\n",
787 					 output_depths,
788 					 improc->supported_color_depths);
789 			return -EINVAL;
790 		}
791 
792 		avail_formats = output_formats &
793 				improc->supported_color_formats;
794 		if (!avail_formats) {
795 			DRM_DEBUG_ATOMIC("No available color_formats, conn formats 0x%x & display: 0x%x\n",
796 					 output_formats,
797 					 improc->supported_color_formats);
798 			return -EINVAL;
799 		}
800 
801 		st->color_depth = __fls(avail_depths);
802 		st->color_format = BIT(__ffs(avail_formats));
803 	}
804 
805 	if (kcrtc_st->base.color_mgmt_changed) {
806 		drm_lut_to_fgamma_coeffs(kcrtc_st->base.gamma_lut,
807 					 st->fgamma_coeffs);
808 		drm_ctm_to_coeffs(kcrtc_st->base.ctm, st->ctm_coeffs);
809 	}
810 
811 	komeda_component_add_input(&st->base, &dflow->input, 0);
812 	komeda_component_set_output(&dflow->input, &improc->base, 0);
813 
814 	return 0;
815 }
816 
817 static int
komeda_timing_ctrlr_validate(struct komeda_timing_ctrlr * ctrlr,struct komeda_crtc_state * kcrtc_st,struct komeda_data_flow_cfg * dflow)818 komeda_timing_ctrlr_validate(struct komeda_timing_ctrlr *ctrlr,
819 			     struct komeda_crtc_state *kcrtc_st,
820 			     struct komeda_data_flow_cfg *dflow)
821 {
822 	struct drm_crtc *crtc = kcrtc_st->base.crtc;
823 	struct komeda_timing_ctrlr_state *st;
824 	struct komeda_component_state *c_st;
825 
826 	c_st = komeda_component_get_state_and_set_user(&ctrlr->base,
827 			kcrtc_st->base.state, crtc, crtc);
828 	if (IS_ERR(c_st))
829 		return PTR_ERR(c_st);
830 
831 	st = to_ctrlr_st(c_st);
832 
833 	komeda_component_add_input(&st->base, &dflow->input, 0);
834 	komeda_component_set_output(&dflow->input, &ctrlr->base, 0);
835 
836 	return 0;
837 }
838 
komeda_complete_data_flow_cfg(struct komeda_layer * layer,struct komeda_data_flow_cfg * dflow,struct drm_framebuffer * fb)839 void komeda_complete_data_flow_cfg(struct komeda_layer *layer,
840 				   struct komeda_data_flow_cfg *dflow,
841 				   struct drm_framebuffer *fb)
842 {
843 	struct komeda_scaler *scaler = layer->base.pipeline->scalers[0];
844 	u32 w = dflow->in_w;
845 	u32 h = dflow->in_h;
846 
847 	dflow->total_in_w = dflow->in_w;
848 	dflow->total_in_h = dflow->in_h;
849 	dflow->total_out_w = dflow->out_w;
850 
851 	/* if format doesn't have alpha, fix blend mode to PIXEL_NONE */
852 	if (!fb->format->has_alpha)
853 		dflow->pixel_blend_mode = DRM_MODE_BLEND_PIXEL_NONE;
854 
855 	if (drm_rotation_90_or_270(dflow->rot))
856 		swap(w, h);
857 
858 	dflow->en_scaling = (w != dflow->out_w) || (h != dflow->out_h);
859 	dflow->is_yuv = fb->format->is_yuv;
860 
861 	/* try to enable image enhancer if data flow is a 2x+ upscaling */
862 	dflow->en_img_enhancement = dflow->out_w >= 2 * w ||
863 				    dflow->out_h >= 2 * h;
864 
865 	/* try to enable split if scaling exceed the scaler's acceptable
866 	 * input/output range.
867 	 */
868 	if (dflow->en_scaling && scaler)
869 		dflow->en_split = !in_range(&scaler->hsize, dflow->in_w) ||
870 				  !in_range(&scaler->hsize, dflow->out_w);
871 }
872 
merger_is_available(struct komeda_pipeline * pipe,struct komeda_data_flow_cfg * dflow)873 static bool merger_is_available(struct komeda_pipeline *pipe,
874 				struct komeda_data_flow_cfg *dflow)
875 {
876 	u32 avail_inputs = pipe->merger ?
877 			   pipe->merger->base.supported_inputs : 0;
878 
879 	return has_bit(dflow->input.component->id, avail_inputs);
880 }
881 
komeda_build_layer_data_flow(struct komeda_layer * layer,struct komeda_plane_state * kplane_st,struct komeda_crtc_state * kcrtc_st,struct komeda_data_flow_cfg * dflow)882 int komeda_build_layer_data_flow(struct komeda_layer *layer,
883 				 struct komeda_plane_state *kplane_st,
884 				 struct komeda_crtc_state *kcrtc_st,
885 				 struct komeda_data_flow_cfg *dflow)
886 {
887 	struct drm_plane *plane = kplane_st->base.plane;
888 	struct komeda_pipeline *pipe = layer->base.pipeline;
889 	int err;
890 
891 	DRM_DEBUG_ATOMIC("%s handling [PLANE:%d:%s]: src[x/y:%d/%d, w/h:%d/%d] disp[x/y:%d/%d, w/h:%d/%d]",
892 			 layer->base.name, plane->base.id, plane->name,
893 			 dflow->in_x, dflow->in_y, dflow->in_w, dflow->in_h,
894 			 dflow->out_x, dflow->out_y, dflow->out_w, dflow->out_h);
895 
896 	err = komeda_layer_validate(layer, kplane_st, dflow);
897 	if (err)
898 		return err;
899 
900 	err = komeda_scaler_validate(plane, kcrtc_st, dflow);
901 	if (err)
902 		return err;
903 
904 	/* if split, check if can put the data flow into merger */
905 	if (dflow->en_split && merger_is_available(pipe, dflow))
906 		return 0;
907 
908 	err = komeda_compiz_set_input(pipe->compiz, kcrtc_st, dflow);
909 
910 	return err;
911 }
912 
913 /*
914  * Split is introduced for workaround scaler's input/output size limitation.
915  * The idea is simple, if one scaler can not fit the requirement, use two.
916  * So split splits the big source image to two half parts (left/right) and do
917  * the scaling by two scaler separately and independently.
918  * But split also imports an edge problem in the middle of the image when
919  * scaling, to avoid it, split isn't a simple half-and-half, but add an extra
920  * pixels (overlap) to both side, after split the left/right will be:
921  * - left: [0, src_length/2 + overlap]
922  * - right: [src_length/2 - overlap, src_length]
923  * The extra overlap do eliminate the edge problem, but which may also generates
924  * unnecessary pixels when scaling, we need to crop them before scaler output
925  * the result to the next stage. and for the how to crop, it depends on the
926  * unneeded pixels, another words the position where overlay has been added.
927  * - left: crop the right
928  * - right: crop the left
929  *
930  * The diagram for how to do the split
931  *
932  *  <---------------------left->out_w ---------------->
933  * |--------------------------------|---right_crop-----| <- left after split
934  *  \                                \                /
935  *   \                                \<--overlap--->/
936  *   |-----------------|-------------|(Middle)------|-----------------| <- src
937  *                     /<---overlap--->\                               \
938  *                    /                 \                               \
939  * right after split->|-----left_crop---|--------------------------------|
940  *                    ^<------------------- right->out_w --------------->^
941  *
942  * NOTE: To consistent with HW the output_w always contains the crop size.
943  */
944 
komeda_split_data_flow(struct komeda_scaler * scaler,struct komeda_data_flow_cfg * dflow,struct komeda_data_flow_cfg * l_dflow,struct komeda_data_flow_cfg * r_dflow)945 static void komeda_split_data_flow(struct komeda_scaler *scaler,
946 				   struct komeda_data_flow_cfg *dflow,
947 				   struct komeda_data_flow_cfg *l_dflow,
948 				   struct komeda_data_flow_cfg *r_dflow)
949 {
950 	bool r90 = drm_rotation_90_or_270(dflow->rot);
951 	bool flip_h = has_flip_h(dflow->rot);
952 	u32 l_out, r_out, overlap;
953 
954 	memcpy(l_dflow, dflow, sizeof(*dflow));
955 	memcpy(r_dflow, dflow, sizeof(*dflow));
956 
957 	l_dflow->right_part = false;
958 	r_dflow->right_part = true;
959 	r_dflow->blending_zorder = dflow->blending_zorder + 1;
960 
961 	overlap = 0;
962 	if (dflow->en_scaling && scaler)
963 		overlap += scaler->scaling_split_overlap;
964 
965 	/* original dflow may fed into splitter, and which doesn't need
966 	 * enhancement overlap
967 	 */
968 	dflow->overlap = overlap;
969 
970 	if (dflow->en_img_enhancement && scaler)
971 		overlap += scaler->enh_split_overlap;
972 
973 	l_dflow->overlap = overlap;
974 	r_dflow->overlap = overlap;
975 
976 	/* split the origin content */
977 	/* left/right here always means the left/right part of display image,
978 	 * not the source Image
979 	 */
980 	/* DRM rotation is anti-clockwise */
981 	if (r90) {
982 		if (dflow->en_scaling) {
983 			l_dflow->in_h = ALIGN(dflow->in_h, 2) / 2 + l_dflow->overlap;
984 			r_dflow->in_h = l_dflow->in_h;
985 		} else if (dflow->en_img_enhancement) {
986 			/* enhancer only */
987 			l_dflow->in_h = ALIGN(dflow->in_h, 2) / 2 + l_dflow->overlap;
988 			r_dflow->in_h = dflow->in_h / 2 + r_dflow->overlap;
989 		} else {
990 			/* split without scaler, no overlap */
991 			l_dflow->in_h = ALIGN(((dflow->in_h + 1) >> 1), 2);
992 			r_dflow->in_h = dflow->in_h - l_dflow->in_h;
993 		}
994 
995 		/* Consider YUV format, after split, the split source w/h
996 		 * may not aligned to 2. we have two choices for such case.
997 		 * 1. scaler is enabled (overlap != 0), we can do a alignment
998 		 *    both left/right and crop the extra data by scaler.
999 		 * 2. scaler is not enabled, only align the split left
1000 		 *    src/disp, and the rest part assign to right
1001 		 */
1002 		if ((overlap != 0) && dflow->is_yuv) {
1003 			l_dflow->in_h = ALIGN(l_dflow->in_h, 2);
1004 			r_dflow->in_h = ALIGN(r_dflow->in_h, 2);
1005 		}
1006 
1007 		if (flip_h)
1008 			l_dflow->in_y = dflow->in_y + dflow->in_h - l_dflow->in_h;
1009 		else
1010 			r_dflow->in_y = dflow->in_y + dflow->in_h - r_dflow->in_h;
1011 	} else {
1012 		if (dflow->en_scaling) {
1013 			l_dflow->in_w = ALIGN(dflow->in_w, 2) / 2 + l_dflow->overlap;
1014 			r_dflow->in_w = l_dflow->in_w;
1015 		} else if (dflow->en_img_enhancement) {
1016 			l_dflow->in_w = ALIGN(dflow->in_w, 2) / 2 + l_dflow->overlap;
1017 			r_dflow->in_w = dflow->in_w / 2 + r_dflow->overlap;
1018 		} else {
1019 			l_dflow->in_w = ALIGN(((dflow->in_w + 1) >> 1), 2);
1020 			r_dflow->in_w = dflow->in_w - l_dflow->in_w;
1021 		}
1022 
1023 		/* do YUV alignment when scaler enabled */
1024 		if ((overlap != 0) && dflow->is_yuv) {
1025 			l_dflow->in_w = ALIGN(l_dflow->in_w, 2);
1026 			r_dflow->in_w = ALIGN(r_dflow->in_w, 2);
1027 		}
1028 
1029 		/* on flip_h, the left display content from the right-source */
1030 		if (flip_h)
1031 			l_dflow->in_x = dflow->in_w + dflow->in_x - l_dflow->in_w;
1032 		else
1033 			r_dflow->in_x = dflow->in_w + dflow->in_x - r_dflow->in_w;
1034 	}
1035 
1036 	/* split the disp_rect */
1037 	if (dflow->en_scaling || dflow->en_img_enhancement)
1038 		l_dflow->out_w = ((dflow->out_w + 1) >> 1);
1039 	else
1040 		l_dflow->out_w = ALIGN(((dflow->out_w + 1) >> 1), 2);
1041 
1042 	r_dflow->out_w = dflow->out_w - l_dflow->out_w;
1043 
1044 	l_dflow->out_x = dflow->out_x;
1045 	r_dflow->out_x = l_dflow->out_w + l_dflow->out_x;
1046 
1047 	/* calculate the scaling crop */
1048 	/* left scaler output more data and do crop */
1049 	if (r90) {
1050 		l_out = (dflow->out_w * l_dflow->in_h) / dflow->in_h;
1051 		r_out = (dflow->out_w * r_dflow->in_h) / dflow->in_h;
1052 	} else {
1053 		l_out = (dflow->out_w * l_dflow->in_w) / dflow->in_w;
1054 		r_out = (dflow->out_w * r_dflow->in_w) / dflow->in_w;
1055 	}
1056 
1057 	l_dflow->left_crop  = 0;
1058 	l_dflow->right_crop = l_out - l_dflow->out_w;
1059 	r_dflow->left_crop  = r_out - r_dflow->out_w;
1060 	r_dflow->right_crop = 0;
1061 
1062 	/* out_w includes the crop length */
1063 	l_dflow->out_w += l_dflow->right_crop + l_dflow->left_crop;
1064 	r_dflow->out_w += r_dflow->right_crop + r_dflow->left_crop;
1065 }
1066 
1067 /* For layer split, a plane state will be split to two data flows and handled
1068  * by two separated komeda layer input pipelines. komeda supports two types of
1069  * layer split:
1070  * - none-scaling split:
1071  *             / layer-left -> \
1072  * plane_state                  compiz-> ...
1073  *             \ layer-right-> /
1074  *
1075  * - scaling split:
1076  *             / layer-left -> scaler->\
1077  * plane_state                          merger -> compiz-> ...
1078  *             \ layer-right-> scaler->/
1079  *
1080  * Since merger only supports scaler as input, so for none-scaling split, two
1081  * layer data flows will be output to compiz directly. for scaling_split, two
1082  * data flow will be merged by merger firstly, then merger outputs one merged
1083  * data flow to compiz.
1084  */
komeda_build_layer_split_data_flow(struct komeda_layer * left,struct komeda_plane_state * kplane_st,struct komeda_crtc_state * kcrtc_st,struct komeda_data_flow_cfg * dflow)1085 int komeda_build_layer_split_data_flow(struct komeda_layer *left,
1086 				       struct komeda_plane_state *kplane_st,
1087 				       struct komeda_crtc_state *kcrtc_st,
1088 				       struct komeda_data_flow_cfg *dflow)
1089 {
1090 	struct drm_plane *plane = kplane_st->base.plane;
1091 	struct komeda_pipeline *pipe = left->base.pipeline;
1092 	struct komeda_layer *right = left->right;
1093 	struct komeda_data_flow_cfg l_dflow, r_dflow;
1094 	int err;
1095 
1096 	komeda_split_data_flow(pipe->scalers[0], dflow, &l_dflow, &r_dflow);
1097 
1098 	DRM_DEBUG_ATOMIC("Assign %s + %s to [PLANE:%d:%s]: "
1099 			 "src[x/y:%d/%d, w/h:%d/%d] disp[x/y:%d/%d, w/h:%d/%d]",
1100 			 left->base.name, right->base.name,
1101 			 plane->base.id, plane->name,
1102 			 dflow->in_x, dflow->in_y, dflow->in_w, dflow->in_h,
1103 			 dflow->out_x, dflow->out_y, dflow->out_w, dflow->out_h);
1104 
1105 	err = komeda_build_layer_data_flow(left, kplane_st, kcrtc_st, &l_dflow);
1106 	if (err)
1107 		return err;
1108 
1109 	err = komeda_build_layer_data_flow(right, kplane_st, kcrtc_st, &r_dflow);
1110 	if (err)
1111 		return err;
1112 
1113 	/* The rotation has been handled by layer, so adjusted the data flow */
1114 	komeda_rotate_data_flow(dflow, dflow->rot);
1115 
1116 	/* left and right dflow has been merged to compiz already,
1117 	 * no need merger to merge them anymore.
1118 	 */
1119 	if (r_dflow.input.component == l_dflow.input.component)
1120 		return 0;
1121 
1122 	/* line merger path */
1123 	err = komeda_merger_validate(pipe->merger, plane, kcrtc_st,
1124 				     &l_dflow, &r_dflow, dflow);
1125 	if (err)
1126 		return err;
1127 
1128 	err = komeda_compiz_set_input(pipe->compiz, kcrtc_st, dflow);
1129 
1130 	return err;
1131 }
1132 
1133 /* writeback data path: compiz -> scaler -> wb_layer -> memory */
komeda_build_wb_data_flow(struct komeda_layer * wb_layer,struct drm_connector_state * conn_st,struct komeda_crtc_state * kcrtc_st,struct komeda_data_flow_cfg * dflow)1134 int komeda_build_wb_data_flow(struct komeda_layer *wb_layer,
1135 			      struct drm_connector_state *conn_st,
1136 			      struct komeda_crtc_state *kcrtc_st,
1137 			      struct komeda_data_flow_cfg *dflow)
1138 {
1139 	struct drm_connector *conn = conn_st->connector;
1140 	int err;
1141 
1142 	err = komeda_scaler_validate(conn, kcrtc_st, dflow);
1143 	if (err)
1144 		return err;
1145 
1146 	return komeda_wb_layer_validate(wb_layer, conn_st, dflow);
1147 }
1148 
1149 /* writeback scaling split data path:
1150  *                   /-> scaler ->\
1151  * compiz -> splitter              merger -> wb_layer -> memory
1152  *                   \-> scaler ->/
1153  */
komeda_build_wb_split_data_flow(struct komeda_layer * wb_layer,struct drm_connector_state * conn_st,struct komeda_crtc_state * kcrtc_st,struct komeda_data_flow_cfg * dflow)1154 int komeda_build_wb_split_data_flow(struct komeda_layer *wb_layer,
1155 				    struct drm_connector_state *conn_st,
1156 				    struct komeda_crtc_state *kcrtc_st,
1157 				    struct komeda_data_flow_cfg *dflow)
1158 {
1159 	struct komeda_pipeline *pipe = wb_layer->base.pipeline;
1160 	struct drm_connector *conn = conn_st->connector;
1161 	struct komeda_data_flow_cfg l_dflow, r_dflow;
1162 	int err;
1163 
1164 	err = komeda_splitter_validate(pipe->splitter, conn_st,
1165 				       dflow, &l_dflow, &r_dflow);
1166 	if (err)
1167 		return err;
1168 	err = komeda_scaler_validate(conn, kcrtc_st, &l_dflow);
1169 	if (err)
1170 		return err;
1171 
1172 	err = komeda_scaler_validate(conn, kcrtc_st, &r_dflow);
1173 	if (err)
1174 		return err;
1175 
1176 	err = komeda_merger_validate(pipe->merger, conn_st, kcrtc_st,
1177 				     &l_dflow, &r_dflow, dflow);
1178 	if (err)
1179 		return err;
1180 
1181 	return komeda_wb_layer_validate(wb_layer, conn_st, dflow);
1182 }
1183 
1184 /* build display output data flow, the data path is:
1185  * compiz -> improc -> timing_ctrlr
1186  */
komeda_build_display_data_flow(struct komeda_crtc * kcrtc,struct komeda_crtc_state * kcrtc_st)1187 int komeda_build_display_data_flow(struct komeda_crtc *kcrtc,
1188 				   struct komeda_crtc_state *kcrtc_st)
1189 {
1190 	struct komeda_pipeline *master = kcrtc->master;
1191 	struct komeda_pipeline *slave  = kcrtc->slave;
1192 	struct komeda_data_flow_cfg m_dflow; /* master data flow */
1193 	struct komeda_data_flow_cfg s_dflow; /* slave data flow */
1194 	int err;
1195 
1196 	memset(&m_dflow, 0, sizeof(m_dflow));
1197 	memset(&s_dflow, 0, sizeof(s_dflow));
1198 
1199 	if (slave && has_bit(slave->id, kcrtc_st->active_pipes)) {
1200 		err = komeda_compiz_validate(slave->compiz, kcrtc_st, &s_dflow);
1201 		if (err)
1202 			return err;
1203 
1204 		/* merge the slave dflow into master pipeline */
1205 		err = komeda_compiz_set_input(master->compiz, kcrtc_st,
1206 					      &s_dflow);
1207 		if (err)
1208 			return err;
1209 	}
1210 
1211 	err = komeda_compiz_validate(master->compiz, kcrtc_st, &m_dflow);
1212 	if (err)
1213 		return err;
1214 
1215 	err = komeda_improc_validate(master->improc, kcrtc_st, &m_dflow);
1216 	if (err)
1217 		return err;
1218 
1219 	err = komeda_timing_ctrlr_validate(master->ctrlr, kcrtc_st, &m_dflow);
1220 	if (err)
1221 		return err;
1222 
1223 	return 0;
1224 }
1225 
1226 static int
komeda_pipeline_unbound_components(struct komeda_pipeline * pipe,struct komeda_pipeline_state * new)1227 komeda_pipeline_unbound_components(struct komeda_pipeline *pipe,
1228 				   struct komeda_pipeline_state *new)
1229 {
1230 	struct drm_atomic_state *drm_st = new->obj.state;
1231 	struct komeda_pipeline_state *old = priv_to_pipe_st(pipe->obj.state);
1232 	struct komeda_component_state *c_st;
1233 	struct komeda_component *c;
1234 	u32 id;
1235 	unsigned long disabling_comps;
1236 
1237 	WARN_ON(!old);
1238 
1239 	disabling_comps = (~new->active_comps) & old->active_comps;
1240 
1241 	/* unbound all disabling component */
1242 	for_each_set_bit(id, &disabling_comps, 32) {
1243 		c = komeda_pipeline_get_component(pipe, id);
1244 		c_st = komeda_component_get_state_and_set_user(c,
1245 				drm_st, NULL, new->crtc);
1246 		if (PTR_ERR(c_st) == -EDEADLK)
1247 			return -EDEADLK;
1248 		WARN_ON(IS_ERR(c_st));
1249 	}
1250 
1251 	return 0;
1252 }
1253 
1254 /* release unclaimed pipeline resource */
komeda_release_unclaimed_resources(struct komeda_pipeline * pipe,struct komeda_crtc_state * kcrtc_st)1255 int komeda_release_unclaimed_resources(struct komeda_pipeline *pipe,
1256 				       struct komeda_crtc_state *kcrtc_st)
1257 {
1258 	struct drm_atomic_state *drm_st = kcrtc_st->base.state;
1259 	struct komeda_pipeline_state *st;
1260 
1261 	/* ignore the pipeline which is not affected */
1262 	if (!pipe || !has_bit(pipe->id, kcrtc_st->affected_pipes))
1263 		return 0;
1264 
1265 	if (has_bit(pipe->id, kcrtc_st->active_pipes))
1266 		st = komeda_pipeline_get_new_state(pipe, drm_st);
1267 	else
1268 		st = komeda_pipeline_get_state_and_set_crtc(pipe, drm_st, NULL);
1269 
1270 	if (WARN_ON(IS_ERR_OR_NULL(st)))
1271 		return -EINVAL;
1272 
1273 	return komeda_pipeline_unbound_components(pipe, st);
1274 
1275 }
1276 
1277 /* Since standalong disabled components must be disabled separately and in the
1278  * last, So a complete disable operation may needs to call pipeline_disable
1279  * twice (two phase disabling).
1280  * Phase 1: disable the common components, flush it.
1281  * Phase 2: disable the standalone disabled components, flush it.
1282  *
1283  * RETURNS:
1284  * true: disable is not complete, needs a phase 2 disable.
1285  * false: disable is complete.
1286  */
komeda_pipeline_disable(struct komeda_pipeline * pipe,struct drm_atomic_state * old_state)1287 bool komeda_pipeline_disable(struct komeda_pipeline *pipe,
1288 			     struct drm_atomic_state *old_state)
1289 {
1290 	struct komeda_pipeline_state *old;
1291 	struct komeda_component *c;
1292 	struct komeda_component_state *c_st;
1293 	u32 id;
1294 	unsigned long disabling_comps;
1295 
1296 	old = komeda_pipeline_get_old_state(pipe, old_state);
1297 
1298 	disabling_comps = old->active_comps &
1299 			  (~pipe->standalone_disabled_comps);
1300 	if (!disabling_comps)
1301 		disabling_comps = old->active_comps &
1302 				  pipe->standalone_disabled_comps;
1303 
1304 	DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, disabling_comps: 0x%lx.\n",
1305 			 pipe->id, old->active_comps, disabling_comps);
1306 
1307 	for_each_set_bit(id, &disabling_comps, 32) {
1308 		c = komeda_pipeline_get_component(pipe, id);
1309 		c_st = priv_to_comp_st(c->obj.state);
1310 
1311 		/*
1312 		 * If we disabled a component then all active_inputs should be
1313 		 * put in the list of changed_active_inputs, so they get
1314 		 * re-enabled.
1315 		 * This usually happens during a modeset when the pipeline is
1316 		 * first disabled and then the actual state gets committed
1317 		 * again.
1318 		 */
1319 		c_st->changed_active_inputs |= c_st->active_inputs;
1320 
1321 		c->funcs->disable(c);
1322 	}
1323 
1324 	/* Update the pipeline state, if there are components that are still
1325 	 * active, return true for calling the phase 2 disable.
1326 	 */
1327 	old->active_comps &= ~disabling_comps;
1328 
1329 	return old->active_comps ? true : false;
1330 }
1331 
komeda_pipeline_update(struct komeda_pipeline * pipe,struct drm_atomic_state * old_state)1332 void komeda_pipeline_update(struct komeda_pipeline *pipe,
1333 			    struct drm_atomic_state *old_state)
1334 {
1335 	struct komeda_pipeline_state *new = priv_to_pipe_st(pipe->obj.state);
1336 	struct komeda_pipeline_state *old;
1337 	struct komeda_component *c;
1338 	u32 id;
1339 	unsigned long changed_comps;
1340 
1341 	old = komeda_pipeline_get_old_state(pipe, old_state);
1342 
1343 	changed_comps = new->active_comps | old->active_comps;
1344 
1345 	DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, changed: 0x%lx.\n",
1346 			 pipe->id, new->active_comps, changed_comps);
1347 
1348 	for_each_set_bit(id, &changed_comps, 32) {
1349 		c = komeda_pipeline_get_component(pipe, id);
1350 
1351 		if (new->active_comps & BIT(c->id))
1352 			c->funcs->update(c, priv_to_comp_st(c->obj.state));
1353 		else
1354 			c->funcs->disable(c);
1355 	}
1356 }
1357