1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
4 */
5
6 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
7 #include "dpu_encoder_phys.h"
8 #include "dpu_hw_interrupts.h"
9 #include "dpu_core_irq.h"
10 #include "dpu_formats.h"
11 #include "dpu_trace.h"
12
13 #define DPU_DEBUG_CMDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
14 (e) && (e)->base.parent ? \
15 (e)->base.parent->base.id : -1, \
16 (e) ? (e)->base.intf_idx - INTF_0 : -1, ##__VA_ARGS__)
17
18 #define DPU_ERROR_CMDENC(e, fmt, ...) DPU_ERROR("enc%d intf%d " fmt, \
19 (e) && (e)->base.parent ? \
20 (e)->base.parent->base.id : -1, \
21 (e) ? (e)->base.intf_idx - INTF_0 : -1, ##__VA_ARGS__)
22
23 #define to_dpu_encoder_phys_cmd(x) \
24 container_of(x, struct dpu_encoder_phys_cmd, base)
25
26 #define PP_TIMEOUT_MAX_TRIALS 10
27
28 /*
29 * Tearcheck sync start and continue thresholds are empirically found
30 * based on common panels In the future, may want to allow panels to override
31 * these default values
32 */
33 #define DEFAULT_TEARCHECK_SYNC_THRESH_START 4
34 #define DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE 4
35
36 #define DPU_ENC_WR_PTR_START_TIMEOUT_US 20000
37
dpu_encoder_phys_cmd_is_master(struct dpu_encoder_phys * phys_enc)38 static bool dpu_encoder_phys_cmd_is_master(struct dpu_encoder_phys *phys_enc)
39 {
40 return (phys_enc->split_role != ENC_ROLE_SLAVE) ? true : false;
41 }
42
dpu_encoder_phys_cmd_mode_fixup(struct dpu_encoder_phys * phys_enc,const struct drm_display_mode * mode,struct drm_display_mode * adj_mode)43 static bool dpu_encoder_phys_cmd_mode_fixup(
44 struct dpu_encoder_phys *phys_enc,
45 const struct drm_display_mode *mode,
46 struct drm_display_mode *adj_mode)
47 {
48 if (phys_enc)
49 DPU_DEBUG_CMDENC(to_dpu_encoder_phys_cmd(phys_enc), "\n");
50 return true;
51 }
52
_dpu_encoder_phys_cmd_update_intf_cfg(struct dpu_encoder_phys * phys_enc)53 static void _dpu_encoder_phys_cmd_update_intf_cfg(
54 struct dpu_encoder_phys *phys_enc)
55 {
56 struct dpu_encoder_phys_cmd *cmd_enc =
57 to_dpu_encoder_phys_cmd(phys_enc);
58 struct dpu_hw_ctl *ctl;
59 struct dpu_hw_intf_cfg intf_cfg = { 0 };
60
61 if (!phys_enc)
62 return;
63
64 ctl = phys_enc->hw_ctl;
65 if (!ctl || !ctl->ops.setup_intf_cfg)
66 return;
67
68 intf_cfg.intf = phys_enc->intf_idx;
69 intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_CMD;
70 intf_cfg.stream_sel = cmd_enc->stream_sel;
71 intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
72 ctl->ops.setup_intf_cfg(ctl, &intf_cfg);
73 }
74
dpu_encoder_phys_cmd_pp_tx_done_irq(void * arg,int irq_idx)75 static void dpu_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
76 {
77 struct dpu_encoder_phys *phys_enc = arg;
78 unsigned long lock_flags;
79 int new_cnt;
80 u32 event = DPU_ENCODER_FRAME_EVENT_DONE;
81
82 if (!phys_enc || !phys_enc->hw_pp)
83 return;
84
85 DPU_ATRACE_BEGIN("pp_done_irq");
86 /* notify all synchronous clients first, then asynchronous clients */
87 if (phys_enc->parent_ops->handle_frame_done)
88 phys_enc->parent_ops->handle_frame_done(phys_enc->parent,
89 phys_enc, event);
90
91 spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
92 new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
93 spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
94
95 trace_dpu_enc_phys_cmd_pp_tx_done(DRMID(phys_enc->parent),
96 phys_enc->hw_pp->idx - PINGPONG_0,
97 new_cnt, event);
98
99 /* Signal any waiting atomic commit thread */
100 wake_up_all(&phys_enc->pending_kickoff_wq);
101 DPU_ATRACE_END("pp_done_irq");
102 }
103
dpu_encoder_phys_cmd_pp_rd_ptr_irq(void * arg,int irq_idx)104 static void dpu_encoder_phys_cmd_pp_rd_ptr_irq(void *arg, int irq_idx)
105 {
106 struct dpu_encoder_phys *phys_enc = arg;
107 struct dpu_encoder_phys_cmd *cmd_enc;
108
109 if (!phys_enc || !phys_enc->hw_pp)
110 return;
111
112 DPU_ATRACE_BEGIN("rd_ptr_irq");
113 cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
114
115 if (phys_enc->parent_ops->handle_vblank_virt)
116 phys_enc->parent_ops->handle_vblank_virt(phys_enc->parent,
117 phys_enc);
118
119 atomic_add_unless(&cmd_enc->pending_vblank_cnt, -1, 0);
120 wake_up_all(&cmd_enc->pending_vblank_wq);
121 DPU_ATRACE_END("rd_ptr_irq");
122 }
123
dpu_encoder_phys_cmd_ctl_start_irq(void * arg,int irq_idx)124 static void dpu_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx)
125 {
126 struct dpu_encoder_phys *phys_enc = arg;
127 struct dpu_encoder_phys_cmd *cmd_enc;
128
129 if (!phys_enc || !phys_enc->hw_ctl)
130 return;
131
132 DPU_ATRACE_BEGIN("ctl_start_irq");
133 cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
134
135 atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
136
137 /* Signal any waiting ctl start interrupt */
138 wake_up_all(&phys_enc->pending_kickoff_wq);
139 DPU_ATRACE_END("ctl_start_irq");
140 }
141
dpu_encoder_phys_cmd_underrun_irq(void * arg,int irq_idx)142 static void dpu_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx)
143 {
144 struct dpu_encoder_phys *phys_enc = arg;
145
146 if (!phys_enc)
147 return;
148
149 if (phys_enc->parent_ops->handle_underrun_virt)
150 phys_enc->parent_ops->handle_underrun_virt(phys_enc->parent,
151 phys_enc);
152 }
153
_dpu_encoder_phys_cmd_setup_irq_hw_idx(struct dpu_encoder_phys * phys_enc)154 static void _dpu_encoder_phys_cmd_setup_irq_hw_idx(
155 struct dpu_encoder_phys *phys_enc)
156 {
157 struct dpu_encoder_irq *irq;
158
159 irq = &phys_enc->irq[INTR_IDX_CTL_START];
160 irq->hw_idx = phys_enc->hw_ctl->idx;
161 irq->irq_idx = -EINVAL;
162
163 irq = &phys_enc->irq[INTR_IDX_PINGPONG];
164 irq->hw_idx = phys_enc->hw_pp->idx;
165 irq->irq_idx = -EINVAL;
166
167 irq = &phys_enc->irq[INTR_IDX_RDPTR];
168 irq->hw_idx = phys_enc->hw_pp->idx;
169 irq->irq_idx = -EINVAL;
170
171 irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
172 irq->hw_idx = phys_enc->intf_idx;
173 irq->irq_idx = -EINVAL;
174 }
175
dpu_encoder_phys_cmd_mode_set(struct dpu_encoder_phys * phys_enc,struct drm_display_mode * mode,struct drm_display_mode * adj_mode)176 static void dpu_encoder_phys_cmd_mode_set(
177 struct dpu_encoder_phys *phys_enc,
178 struct drm_display_mode *mode,
179 struct drm_display_mode *adj_mode)
180 {
181 struct dpu_encoder_phys_cmd *cmd_enc =
182 to_dpu_encoder_phys_cmd(phys_enc);
183
184 if (!phys_enc || !mode || !adj_mode) {
185 DPU_ERROR("invalid args\n");
186 return;
187 }
188 phys_enc->cached_mode = *adj_mode;
189 DPU_DEBUG_CMDENC(cmd_enc, "caching mode:\n");
190 drm_mode_debug_printmodeline(adj_mode);
191
192 _dpu_encoder_phys_cmd_setup_irq_hw_idx(phys_enc);
193 }
194
_dpu_encoder_phys_cmd_handle_ppdone_timeout(struct dpu_encoder_phys * phys_enc)195 static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
196 struct dpu_encoder_phys *phys_enc)
197 {
198 struct dpu_encoder_phys_cmd *cmd_enc =
199 to_dpu_encoder_phys_cmd(phys_enc);
200 u32 frame_event = DPU_ENCODER_FRAME_EVENT_ERROR;
201 bool do_log = false;
202
203 if (!phys_enc || !phys_enc->hw_pp || !phys_enc->hw_ctl)
204 return -EINVAL;
205
206 cmd_enc->pp_timeout_report_cnt++;
207 if (cmd_enc->pp_timeout_report_cnt == PP_TIMEOUT_MAX_TRIALS) {
208 frame_event |= DPU_ENCODER_FRAME_EVENT_PANEL_DEAD;
209 do_log = true;
210 } else if (cmd_enc->pp_timeout_report_cnt == 1) {
211 do_log = true;
212 }
213
214 trace_dpu_enc_phys_cmd_pdone_timeout(DRMID(phys_enc->parent),
215 phys_enc->hw_pp->idx - PINGPONG_0,
216 cmd_enc->pp_timeout_report_cnt,
217 atomic_read(&phys_enc->pending_kickoff_cnt),
218 frame_event);
219
220 /* to avoid flooding, only log first time, and "dead" time */
221 if (do_log) {
222 DRM_ERROR("id:%d pp:%d kickoff timeout %d cnt %d koff_cnt %d\n",
223 DRMID(phys_enc->parent),
224 phys_enc->hw_pp->idx - PINGPONG_0,
225 phys_enc->hw_ctl->idx - CTL_0,
226 cmd_enc->pp_timeout_report_cnt,
227 atomic_read(&phys_enc->pending_kickoff_cnt));
228
229 dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_RDPTR);
230 }
231
232 atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
233
234 /* request a ctl reset before the next kickoff */
235 phys_enc->enable_state = DPU_ENC_ERR_NEEDS_HW_RESET;
236
237 if (phys_enc->parent_ops->handle_frame_done)
238 phys_enc->parent_ops->handle_frame_done(
239 phys_enc->parent, phys_enc, frame_event);
240
241 return -ETIMEDOUT;
242 }
243
_dpu_encoder_phys_cmd_wait_for_idle(struct dpu_encoder_phys * phys_enc)244 static int _dpu_encoder_phys_cmd_wait_for_idle(
245 struct dpu_encoder_phys *phys_enc)
246 {
247 struct dpu_encoder_phys_cmd *cmd_enc =
248 to_dpu_encoder_phys_cmd(phys_enc);
249 struct dpu_encoder_wait_info wait_info;
250 int ret;
251
252 if (!phys_enc) {
253 DPU_ERROR("invalid encoder\n");
254 return -EINVAL;
255 }
256
257 wait_info.wq = &phys_enc->pending_kickoff_wq;
258 wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
259 wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
260
261 ret = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_PINGPONG,
262 &wait_info);
263 if (ret == -ETIMEDOUT)
264 _dpu_encoder_phys_cmd_handle_ppdone_timeout(phys_enc);
265 else if (!ret)
266 cmd_enc->pp_timeout_report_cnt = 0;
267
268 return ret;
269 }
270
dpu_encoder_phys_cmd_control_vblank_irq(struct dpu_encoder_phys * phys_enc,bool enable)271 static int dpu_encoder_phys_cmd_control_vblank_irq(
272 struct dpu_encoder_phys *phys_enc,
273 bool enable)
274 {
275 int ret = 0;
276 int refcount;
277
278 if (!phys_enc || !phys_enc->hw_pp) {
279 DPU_ERROR("invalid encoder\n");
280 return -EINVAL;
281 }
282
283 refcount = atomic_read(&phys_enc->vblank_refcount);
284
285 /* Slave encoders don't report vblank */
286 if (!dpu_encoder_phys_cmd_is_master(phys_enc))
287 goto end;
288
289 /* protect against negative */
290 if (!enable && refcount == 0) {
291 ret = -EINVAL;
292 goto end;
293 }
294
295 DRM_DEBUG_KMS("id:%u pp:%d enable=%s/%d\n", DRMID(phys_enc->parent),
296 phys_enc->hw_pp->idx - PINGPONG_0,
297 enable ? "true" : "false", refcount);
298
299 if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
300 ret = dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_RDPTR);
301 else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
302 ret = dpu_encoder_helper_unregister_irq(phys_enc,
303 INTR_IDX_RDPTR);
304
305 end:
306 if (ret) {
307 DRM_ERROR("vblank irq err id:%u pp:%d ret:%d, enable %s/%d\n",
308 DRMID(phys_enc->parent),
309 phys_enc->hw_pp->idx - PINGPONG_0, ret,
310 enable ? "true" : "false", refcount);
311 }
312
313 return ret;
314 }
315
dpu_encoder_phys_cmd_irq_control(struct dpu_encoder_phys * phys_enc,bool enable)316 static void dpu_encoder_phys_cmd_irq_control(struct dpu_encoder_phys *phys_enc,
317 bool enable)
318 {
319 struct dpu_encoder_phys_cmd *cmd_enc;
320
321 if (!phys_enc)
322 return;
323
324 cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
325
326 trace_dpu_enc_phys_cmd_irq_ctrl(DRMID(phys_enc->parent),
327 phys_enc->hw_pp->idx - PINGPONG_0,
328 enable, atomic_read(&phys_enc->vblank_refcount));
329
330 if (enable) {
331 dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_PINGPONG);
332 dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_UNDERRUN);
333 dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, true);
334
335 if (dpu_encoder_phys_cmd_is_master(phys_enc))
336 dpu_encoder_helper_register_irq(phys_enc,
337 INTR_IDX_CTL_START);
338 } else {
339 if (dpu_encoder_phys_cmd_is_master(phys_enc))
340 dpu_encoder_helper_unregister_irq(phys_enc,
341 INTR_IDX_CTL_START);
342
343 dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_UNDERRUN);
344 dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
345 dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_PINGPONG);
346 }
347 }
348
dpu_encoder_phys_cmd_tearcheck_config(struct dpu_encoder_phys * phys_enc)349 static void dpu_encoder_phys_cmd_tearcheck_config(
350 struct dpu_encoder_phys *phys_enc)
351 {
352 struct dpu_encoder_phys_cmd *cmd_enc =
353 to_dpu_encoder_phys_cmd(phys_enc);
354 struct dpu_hw_tear_check tc_cfg = { 0 };
355 struct drm_display_mode *mode;
356 bool tc_enable = true;
357 u32 vsync_hz;
358 struct msm_drm_private *priv;
359 struct dpu_kms *dpu_kms;
360
361 if (!phys_enc || !phys_enc->hw_pp) {
362 DPU_ERROR("invalid encoder\n");
363 return;
364 }
365 mode = &phys_enc->cached_mode;
366
367 DPU_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
368
369 if (!phys_enc->hw_pp->ops.setup_tearcheck ||
370 !phys_enc->hw_pp->ops.enable_tearcheck) {
371 DPU_DEBUG_CMDENC(cmd_enc, "tearcheck not supported\n");
372 return;
373 }
374
375 dpu_kms = phys_enc->dpu_kms;
376 if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev_private) {
377 DPU_ERROR("invalid device\n");
378 return;
379 }
380 priv = dpu_kms->dev->dev_private;
381
382 /*
383 * TE default: dsi byte clock calculated base on 70 fps;
384 * around 14 ms to complete a kickoff cycle if te disabled;
385 * vclk_line base on 60 fps; write is faster than read;
386 * init == start == rdptr;
387 *
388 * vsync_count is ratio of MDP VSYNC clock frequency to LCD panel
389 * frequency divided by the no. of rows (lines) in the LCDpanel.
390 */
391 vsync_hz = dpu_kms_get_clk_rate(dpu_kms, "vsync");
392 if (vsync_hz <= 0) {
393 DPU_DEBUG_CMDENC(cmd_enc, "invalid - vsync_hz %u\n",
394 vsync_hz);
395 return;
396 }
397
398 tc_cfg.vsync_count = vsync_hz /
399 (mode->vtotal * drm_mode_vrefresh(mode));
400
401 /* enable external TE after kickoff to avoid premature autorefresh */
402 tc_cfg.hw_vsync_mode = 0;
403
404 /*
405 * By setting sync_cfg_height to near max register value, we essentially
406 * disable dpu hw generated TE signal, since hw TE will arrive first.
407 * Only caveat is if due to error, we hit wrap-around.
408 */
409 tc_cfg.sync_cfg_height = 0xFFF0;
410 tc_cfg.vsync_init_val = mode->vdisplay;
411 tc_cfg.sync_threshold_start = DEFAULT_TEARCHECK_SYNC_THRESH_START;
412 tc_cfg.sync_threshold_continue = DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE;
413 tc_cfg.start_pos = mode->vdisplay;
414 tc_cfg.rd_ptr_irq = mode->vdisplay + 1;
415
416 DPU_DEBUG_CMDENC(cmd_enc,
417 "tc %d vsync_clk_speed_hz %u vtotal %u vrefresh %u\n",
418 phys_enc->hw_pp->idx - PINGPONG_0, vsync_hz,
419 mode->vtotal, drm_mode_vrefresh(mode));
420 DPU_DEBUG_CMDENC(cmd_enc,
421 "tc %d enable %u start_pos %u rd_ptr_irq %u\n",
422 phys_enc->hw_pp->idx - PINGPONG_0, tc_enable, tc_cfg.start_pos,
423 tc_cfg.rd_ptr_irq);
424 DPU_DEBUG_CMDENC(cmd_enc,
425 "tc %d hw_vsync_mode %u vsync_count %u vsync_init_val %u\n",
426 phys_enc->hw_pp->idx - PINGPONG_0, tc_cfg.hw_vsync_mode,
427 tc_cfg.vsync_count, tc_cfg.vsync_init_val);
428 DPU_DEBUG_CMDENC(cmd_enc,
429 "tc %d cfgheight %u thresh_start %u thresh_cont %u\n",
430 phys_enc->hw_pp->idx - PINGPONG_0, tc_cfg.sync_cfg_height,
431 tc_cfg.sync_threshold_start, tc_cfg.sync_threshold_continue);
432
433 phys_enc->hw_pp->ops.setup_tearcheck(phys_enc->hw_pp, &tc_cfg);
434 phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, tc_enable);
435 }
436
_dpu_encoder_phys_cmd_pingpong_config(struct dpu_encoder_phys * phys_enc)437 static void _dpu_encoder_phys_cmd_pingpong_config(
438 struct dpu_encoder_phys *phys_enc)
439 {
440 struct dpu_encoder_phys_cmd *cmd_enc =
441 to_dpu_encoder_phys_cmd(phys_enc);
442
443 if (!phys_enc || !phys_enc->hw_ctl || !phys_enc->hw_pp
444 || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
445 DPU_ERROR("invalid arg(s), enc %d\n", phys_enc != 0);
446 return;
447 }
448
449 DPU_DEBUG_CMDENC(cmd_enc, "pp %d, enabling mode:\n",
450 phys_enc->hw_pp->idx - PINGPONG_0);
451 drm_mode_debug_printmodeline(&phys_enc->cached_mode);
452
453 _dpu_encoder_phys_cmd_update_intf_cfg(phys_enc);
454 dpu_encoder_phys_cmd_tearcheck_config(phys_enc);
455 }
456
dpu_encoder_phys_cmd_needs_single_flush(struct dpu_encoder_phys * phys_enc)457 static bool dpu_encoder_phys_cmd_needs_single_flush(
458 struct dpu_encoder_phys *phys_enc)
459 {
460 /**
461 * we do separate flush for each CTL and let
462 * CTL_START synchronize them
463 */
464 return false;
465 }
466
dpu_encoder_phys_cmd_enable_helper(struct dpu_encoder_phys * phys_enc)467 static void dpu_encoder_phys_cmd_enable_helper(
468 struct dpu_encoder_phys *phys_enc)
469 {
470 struct dpu_hw_ctl *ctl;
471 u32 flush_mask = 0;
472
473 if (!phys_enc || !phys_enc->hw_ctl || !phys_enc->hw_pp) {
474 DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
475 return;
476 }
477
478 dpu_encoder_helper_split_config(phys_enc, phys_enc->intf_idx);
479
480 _dpu_encoder_phys_cmd_pingpong_config(phys_enc);
481
482 if (!dpu_encoder_phys_cmd_is_master(phys_enc))
483 return;
484
485 ctl = phys_enc->hw_ctl;
486 ctl->ops.get_bitmask_intf(ctl, &flush_mask, phys_enc->intf_idx);
487 ctl->ops.update_pending_flush(ctl, flush_mask);
488 }
489
dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys * phys_enc)490 static void dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys *phys_enc)
491 {
492 struct dpu_encoder_phys_cmd *cmd_enc =
493 to_dpu_encoder_phys_cmd(phys_enc);
494
495 if (!phys_enc || !phys_enc->hw_pp) {
496 DPU_ERROR("invalid phys encoder\n");
497 return;
498 }
499
500 DPU_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
501
502 if (phys_enc->enable_state == DPU_ENC_ENABLED) {
503 DPU_ERROR("already enabled\n");
504 return;
505 }
506
507 dpu_encoder_phys_cmd_enable_helper(phys_enc);
508 phys_enc->enable_state = DPU_ENC_ENABLED;
509 }
510
_dpu_encoder_phys_cmd_connect_te(struct dpu_encoder_phys * phys_enc,bool enable)511 static void _dpu_encoder_phys_cmd_connect_te(
512 struct dpu_encoder_phys *phys_enc, bool enable)
513 {
514 if (!phys_enc || !phys_enc->hw_pp ||
515 !phys_enc->hw_pp->ops.connect_external_te)
516 return;
517
518 trace_dpu_enc_phys_cmd_connect_te(DRMID(phys_enc->parent), enable);
519 phys_enc->hw_pp->ops.connect_external_te(phys_enc->hw_pp, enable);
520 }
521
dpu_encoder_phys_cmd_prepare_idle_pc(struct dpu_encoder_phys * phys_enc)522 static void dpu_encoder_phys_cmd_prepare_idle_pc(
523 struct dpu_encoder_phys *phys_enc)
524 {
525 _dpu_encoder_phys_cmd_connect_te(phys_enc, false);
526 }
527
dpu_encoder_phys_cmd_get_line_count(struct dpu_encoder_phys * phys_enc)528 static int dpu_encoder_phys_cmd_get_line_count(
529 struct dpu_encoder_phys *phys_enc)
530 {
531 struct dpu_hw_pingpong *hw_pp;
532
533 if (!phys_enc || !phys_enc->hw_pp)
534 return -EINVAL;
535
536 if (!dpu_encoder_phys_cmd_is_master(phys_enc))
537 return -EINVAL;
538
539 hw_pp = phys_enc->hw_pp;
540 if (!hw_pp->ops.get_line_count)
541 return -EINVAL;
542
543 return hw_pp->ops.get_line_count(hw_pp);
544 }
545
dpu_encoder_phys_cmd_disable(struct dpu_encoder_phys * phys_enc)546 static void dpu_encoder_phys_cmd_disable(struct dpu_encoder_phys *phys_enc)
547 {
548 struct dpu_encoder_phys_cmd *cmd_enc =
549 to_dpu_encoder_phys_cmd(phys_enc);
550
551 if (!phys_enc || !phys_enc->hw_pp) {
552 DPU_ERROR("invalid encoder\n");
553 return;
554 }
555 DRM_DEBUG_KMS("id:%u pp:%d state:%d\n", DRMID(phys_enc->parent),
556 phys_enc->hw_pp->idx - PINGPONG_0,
557 phys_enc->enable_state);
558
559 if (phys_enc->enable_state == DPU_ENC_DISABLED) {
560 DPU_ERROR_CMDENC(cmd_enc, "already disabled\n");
561 return;
562 }
563
564 if (phys_enc->hw_pp->ops.enable_tearcheck)
565 phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, false);
566 phys_enc->enable_state = DPU_ENC_DISABLED;
567 }
568
dpu_encoder_phys_cmd_destroy(struct dpu_encoder_phys * phys_enc)569 static void dpu_encoder_phys_cmd_destroy(struct dpu_encoder_phys *phys_enc)
570 {
571 struct dpu_encoder_phys_cmd *cmd_enc =
572 to_dpu_encoder_phys_cmd(phys_enc);
573
574 if (!phys_enc) {
575 DPU_ERROR("invalid encoder\n");
576 return;
577 }
578 kfree(cmd_enc);
579 }
580
dpu_encoder_phys_cmd_get_hw_resources(struct dpu_encoder_phys * phys_enc,struct dpu_encoder_hw_resources * hw_res)581 static void dpu_encoder_phys_cmd_get_hw_resources(
582 struct dpu_encoder_phys *phys_enc,
583 struct dpu_encoder_hw_resources *hw_res)
584 {
585 hw_res->intfs[phys_enc->intf_idx - INTF_0] = INTF_MODE_CMD;
586 }
587
dpu_encoder_phys_cmd_prepare_for_kickoff(struct dpu_encoder_phys * phys_enc)588 static void dpu_encoder_phys_cmd_prepare_for_kickoff(
589 struct dpu_encoder_phys *phys_enc)
590 {
591 struct dpu_encoder_phys_cmd *cmd_enc =
592 to_dpu_encoder_phys_cmd(phys_enc);
593 int ret;
594
595 if (!phys_enc || !phys_enc->hw_pp) {
596 DPU_ERROR("invalid encoder\n");
597 return;
598 }
599 DRM_DEBUG_KMS("id:%u pp:%d pending_cnt:%d\n", DRMID(phys_enc->parent),
600 phys_enc->hw_pp->idx - PINGPONG_0,
601 atomic_read(&phys_enc->pending_kickoff_cnt));
602
603 /*
604 * Mark kickoff request as outstanding. If there are more than one,
605 * outstanding, then we have to wait for the previous one to complete
606 */
607 ret = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
608 if (ret) {
609 /* force pending_kickoff_cnt 0 to discard failed kickoff */
610 atomic_set(&phys_enc->pending_kickoff_cnt, 0);
611 DRM_ERROR("failed wait_for_idle: id:%u ret:%d pp:%d\n",
612 DRMID(phys_enc->parent), ret,
613 phys_enc->hw_pp->idx - PINGPONG_0);
614 }
615
616 DPU_DEBUG_CMDENC(cmd_enc, "pp:%d pending_cnt %d\n",
617 phys_enc->hw_pp->idx - PINGPONG_0,
618 atomic_read(&phys_enc->pending_kickoff_cnt));
619 }
620
_dpu_encoder_phys_cmd_wait_for_ctl_start(struct dpu_encoder_phys * phys_enc)621 static int _dpu_encoder_phys_cmd_wait_for_ctl_start(
622 struct dpu_encoder_phys *phys_enc)
623 {
624 struct dpu_encoder_phys_cmd *cmd_enc =
625 to_dpu_encoder_phys_cmd(phys_enc);
626 struct dpu_encoder_wait_info wait_info;
627 int ret;
628
629 if (!phys_enc || !phys_enc->hw_ctl) {
630 DPU_ERROR("invalid argument(s)\n");
631 return -EINVAL;
632 }
633
634 wait_info.wq = &phys_enc->pending_kickoff_wq;
635 wait_info.atomic_cnt = &phys_enc->pending_ctlstart_cnt;
636 wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
637
638 ret = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_CTL_START,
639 &wait_info);
640 if (ret == -ETIMEDOUT) {
641 DPU_ERROR_CMDENC(cmd_enc, "ctl start interrupt wait failed\n");
642 ret = -EINVAL;
643 } else if (!ret)
644 ret = 0;
645
646 return ret;
647 }
648
dpu_encoder_phys_cmd_wait_for_tx_complete(struct dpu_encoder_phys * phys_enc)649 static int dpu_encoder_phys_cmd_wait_for_tx_complete(
650 struct dpu_encoder_phys *phys_enc)
651 {
652 int rc;
653 struct dpu_encoder_phys_cmd *cmd_enc;
654
655 if (!phys_enc)
656 return -EINVAL;
657
658 cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
659
660 rc = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
661 if (rc) {
662 DRM_ERROR("failed wait_for_idle: id:%u ret:%d intf:%d\n",
663 DRMID(phys_enc->parent), rc,
664 phys_enc->intf_idx - INTF_0);
665 }
666
667 return rc;
668 }
669
dpu_encoder_phys_cmd_wait_for_commit_done(struct dpu_encoder_phys * phys_enc)670 static int dpu_encoder_phys_cmd_wait_for_commit_done(
671 struct dpu_encoder_phys *phys_enc)
672 {
673 int rc = 0;
674 struct dpu_encoder_phys_cmd *cmd_enc;
675
676 if (!phys_enc)
677 return -EINVAL;
678
679 cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
680
681 /* only required for master controller */
682 if (dpu_encoder_phys_cmd_is_master(phys_enc))
683 rc = _dpu_encoder_phys_cmd_wait_for_ctl_start(phys_enc);
684
685 /* required for both controllers */
686 if (!rc && cmd_enc->serialize_wait4pp)
687 dpu_encoder_phys_cmd_prepare_for_kickoff(phys_enc);
688
689 return rc;
690 }
691
dpu_encoder_phys_cmd_wait_for_vblank(struct dpu_encoder_phys * phys_enc)692 static int dpu_encoder_phys_cmd_wait_for_vblank(
693 struct dpu_encoder_phys *phys_enc)
694 {
695 int rc = 0;
696 struct dpu_encoder_phys_cmd *cmd_enc;
697 struct dpu_encoder_wait_info wait_info;
698
699 if (!phys_enc)
700 return -EINVAL;
701
702 cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
703
704 /* only required for master controller */
705 if (!dpu_encoder_phys_cmd_is_master(phys_enc))
706 return rc;
707
708 wait_info.wq = &cmd_enc->pending_vblank_wq;
709 wait_info.atomic_cnt = &cmd_enc->pending_vblank_cnt;
710 wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
711
712 atomic_inc(&cmd_enc->pending_vblank_cnt);
713
714 rc = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_RDPTR,
715 &wait_info);
716
717 return rc;
718 }
719
dpu_encoder_phys_cmd_handle_post_kickoff(struct dpu_encoder_phys * phys_enc)720 static void dpu_encoder_phys_cmd_handle_post_kickoff(
721 struct dpu_encoder_phys *phys_enc)
722 {
723 /**
724 * re-enable external TE, either for the first time after enabling
725 * or if disabled for Autorefresh
726 */
727 _dpu_encoder_phys_cmd_connect_te(phys_enc, true);
728 }
729
dpu_encoder_phys_cmd_trigger_start(struct dpu_encoder_phys * phys_enc)730 static void dpu_encoder_phys_cmd_trigger_start(
731 struct dpu_encoder_phys *phys_enc)
732 {
733 if (!phys_enc)
734 return;
735
736 dpu_encoder_helper_trigger_start(phys_enc);
737 }
738
dpu_encoder_phys_cmd_init_ops(struct dpu_encoder_phys_ops * ops)739 static void dpu_encoder_phys_cmd_init_ops(
740 struct dpu_encoder_phys_ops *ops)
741 {
742 ops->is_master = dpu_encoder_phys_cmd_is_master;
743 ops->mode_set = dpu_encoder_phys_cmd_mode_set;
744 ops->mode_fixup = dpu_encoder_phys_cmd_mode_fixup;
745 ops->enable = dpu_encoder_phys_cmd_enable;
746 ops->disable = dpu_encoder_phys_cmd_disable;
747 ops->destroy = dpu_encoder_phys_cmd_destroy;
748 ops->get_hw_resources = dpu_encoder_phys_cmd_get_hw_resources;
749 ops->control_vblank_irq = dpu_encoder_phys_cmd_control_vblank_irq;
750 ops->wait_for_commit_done = dpu_encoder_phys_cmd_wait_for_commit_done;
751 ops->prepare_for_kickoff = dpu_encoder_phys_cmd_prepare_for_kickoff;
752 ops->wait_for_tx_complete = dpu_encoder_phys_cmd_wait_for_tx_complete;
753 ops->wait_for_vblank = dpu_encoder_phys_cmd_wait_for_vblank;
754 ops->trigger_start = dpu_encoder_phys_cmd_trigger_start;
755 ops->needs_single_flush = dpu_encoder_phys_cmd_needs_single_flush;
756 ops->irq_control = dpu_encoder_phys_cmd_irq_control;
757 ops->restore = dpu_encoder_phys_cmd_enable_helper;
758 ops->prepare_idle_pc = dpu_encoder_phys_cmd_prepare_idle_pc;
759 ops->handle_post_kickoff = dpu_encoder_phys_cmd_handle_post_kickoff;
760 ops->get_line_count = dpu_encoder_phys_cmd_get_line_count;
761 }
762
dpu_encoder_phys_cmd_init(struct dpu_enc_phys_init_params * p)763 struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
764 struct dpu_enc_phys_init_params *p)
765 {
766 struct dpu_encoder_phys *phys_enc = NULL;
767 struct dpu_encoder_phys_cmd *cmd_enc = NULL;
768 struct dpu_encoder_irq *irq;
769 int i, ret = 0;
770
771 DPU_DEBUG("intf %d\n", p->intf_idx - INTF_0);
772
773 cmd_enc = kzalloc(sizeof(*cmd_enc), GFP_KERNEL);
774 if (!cmd_enc) {
775 ret = -ENOMEM;
776 DPU_ERROR("failed to allocate\n");
777 return ERR_PTR(ret);
778 }
779 phys_enc = &cmd_enc->base;
780 phys_enc->hw_mdptop = p->dpu_kms->hw_mdp;
781 phys_enc->intf_idx = p->intf_idx;
782
783 dpu_encoder_phys_cmd_init_ops(&phys_enc->ops);
784 phys_enc->parent = p->parent;
785 phys_enc->parent_ops = p->parent_ops;
786 phys_enc->dpu_kms = p->dpu_kms;
787 phys_enc->split_role = p->split_role;
788 phys_enc->intf_mode = INTF_MODE_CMD;
789 phys_enc->enc_spinlock = p->enc_spinlock;
790 cmd_enc->stream_sel = 0;
791 phys_enc->enable_state = DPU_ENC_DISABLED;
792 for (i = 0; i < INTR_IDX_MAX; i++) {
793 irq = &phys_enc->irq[i];
794 INIT_LIST_HEAD(&irq->cb.list);
795 irq->irq_idx = -EINVAL;
796 irq->hw_idx = -EINVAL;
797 irq->cb.arg = phys_enc;
798 }
799
800 irq = &phys_enc->irq[INTR_IDX_CTL_START];
801 irq->name = "ctl_start";
802 irq->intr_type = DPU_IRQ_TYPE_CTL_START;
803 irq->intr_idx = INTR_IDX_CTL_START;
804 irq->cb.func = dpu_encoder_phys_cmd_ctl_start_irq;
805
806 irq = &phys_enc->irq[INTR_IDX_PINGPONG];
807 irq->name = "pp_done";
808 irq->intr_type = DPU_IRQ_TYPE_PING_PONG_COMP;
809 irq->intr_idx = INTR_IDX_PINGPONG;
810 irq->cb.func = dpu_encoder_phys_cmd_pp_tx_done_irq;
811
812 irq = &phys_enc->irq[INTR_IDX_RDPTR];
813 irq->name = "pp_rd_ptr";
814 irq->intr_type = DPU_IRQ_TYPE_PING_PONG_RD_PTR;
815 irq->intr_idx = INTR_IDX_RDPTR;
816 irq->cb.func = dpu_encoder_phys_cmd_pp_rd_ptr_irq;
817
818 irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
819 irq->name = "underrun";
820 irq->intr_type = DPU_IRQ_TYPE_INTF_UNDER_RUN;
821 irq->intr_idx = INTR_IDX_UNDERRUN;
822 irq->cb.func = dpu_encoder_phys_cmd_underrun_irq;
823
824 atomic_set(&phys_enc->vblank_refcount, 0);
825 atomic_set(&phys_enc->pending_kickoff_cnt, 0);
826 atomic_set(&phys_enc->pending_ctlstart_cnt, 0);
827 atomic_set(&cmd_enc->pending_vblank_cnt, 0);
828 init_waitqueue_head(&phys_enc->pending_kickoff_wq);
829 init_waitqueue_head(&cmd_enc->pending_vblank_wq);
830
831 DPU_DEBUG_CMDENC(cmd_enc, "created\n");
832
833 return phys_enc;
834
835 return ERR_PTR(ret);
836 }
837