1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2014-2018, 2020-2021 The Linux Foundation. All rights reserved.
4 * Copyright (C) 2013 Red Hat
5 * Author: Rob Clark <robdclark@gmail.com>
6 */
7
8 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
9 #include <linux/debugfs.h>
10 #include <linux/kthread.h>
11 #include <linux/seq_file.h>
12
13 #include <drm/drm_crtc.h>
14 #include <drm/drm_file.h>
15 #include <drm/drm_probe_helper.h>
16
17 #include "msm_drv.h"
18 #include "dpu_kms.h"
19 #include "dpu_hwio.h"
20 #include "dpu_hw_catalog.h"
21 #include "dpu_hw_intf.h"
22 #include "dpu_hw_ctl.h"
23 #include "dpu_hw_dspp.h"
24 #include "dpu_formats.h"
25 #include "dpu_encoder_phys.h"
26 #include "dpu_crtc.h"
27 #include "dpu_trace.h"
28 #include "dpu_core_irq.h"
29 #include "disp/msm_disp_snapshot.h"
30
31 #define DPU_DEBUG_ENC(e, fmt, ...) DRM_DEBUG_ATOMIC("enc%d " fmt,\
32 (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
33
34 #define DPU_ERROR_ENC(e, fmt, ...) DPU_ERROR("enc%d " fmt,\
35 (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
36
37 #define DPU_DEBUG_PHYS(p, fmt, ...) DRM_DEBUG_ATOMIC("enc%d intf%d pp%d " fmt,\
38 (p) ? (p)->parent->base.id : -1, \
39 (p) ? (p)->intf_idx - INTF_0 : -1, \
40 (p) ? ((p)->hw_pp ? (p)->hw_pp->idx - PINGPONG_0 : -1) : -1, \
41 ##__VA_ARGS__)
42
43 #define DPU_ERROR_PHYS(p, fmt, ...) DPU_ERROR("enc%d intf%d pp%d " fmt,\
44 (p) ? (p)->parent->base.id : -1, \
45 (p) ? (p)->intf_idx - INTF_0 : -1, \
46 (p) ? ((p)->hw_pp ? (p)->hw_pp->idx - PINGPONG_0 : -1) : -1, \
47 ##__VA_ARGS__)
48
49 #define DPU_ERROR_ENC_RATELIMITED(e, fmt, ...) DPU_ERROR_RATELIMITED("enc%d " fmt,\
50 (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
51
52 /*
53 * Two to anticipate panels that can do cmd/vid dynamic switching
54 * plan is to create all possible physical encoder types, and switch between
55 * them at runtime
56 */
57 #define NUM_PHYS_ENCODER_TYPES 2
58
59 #define MAX_PHYS_ENCODERS_PER_VIRTUAL \
60 (MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES)
61
62 #define MAX_CHANNELS_PER_ENC 2
63
64 #define IDLE_SHORT_TIMEOUT 1
65
66 #define MAX_HDISPLAY_SPLIT 1080
67
68 /* timeout in frames waiting for frame done */
69 #define DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES 5
70
71 /**
72 * enum dpu_enc_rc_events - events for resource control state machine
73 * @DPU_ENC_RC_EVENT_KICKOFF:
74 * This event happens at NORMAL priority.
75 * Event that signals the start of the transfer. When this event is
76 * received, enable MDP/DSI core clocks. Regardless of the previous
77 * state, the resource should be in ON state at the end of this event.
78 * @DPU_ENC_RC_EVENT_FRAME_DONE:
79 * This event happens at INTERRUPT level.
80 * Event signals the end of the data transfer after the PP FRAME_DONE
81 * event. At the end of this event, a delayed work is scheduled to go to
82 * IDLE_PC state after IDLE_TIMEOUT time.
83 * @DPU_ENC_RC_EVENT_PRE_STOP:
84 * This event happens at NORMAL priority.
85 * This event, when received during the ON state, leave the RC STATE
86 * in the PRE_OFF state. It should be followed by the STOP event as
87 * part of encoder disable.
88 * If received during IDLE or OFF states, it will do nothing.
89 * @DPU_ENC_RC_EVENT_STOP:
90 * This event happens at NORMAL priority.
91 * When this event is received, disable all the MDP/DSI core clocks, and
92 * disable IRQs. It should be called from the PRE_OFF or IDLE states.
93 * IDLE is expected when IDLE_PC has run, and PRE_OFF did nothing.
94 * PRE_OFF is expected when PRE_STOP was executed during the ON state.
95 * Resource state should be in OFF at the end of the event.
96 * @DPU_ENC_RC_EVENT_ENTER_IDLE:
97 * This event happens at NORMAL priority from a work item.
98 * Event signals that there were no frame updates for IDLE_TIMEOUT time.
99 * This would disable MDP/DSI core clocks and change the resource state
100 * to IDLE.
101 */
102 enum dpu_enc_rc_events {
103 DPU_ENC_RC_EVENT_KICKOFF = 1,
104 DPU_ENC_RC_EVENT_FRAME_DONE,
105 DPU_ENC_RC_EVENT_PRE_STOP,
106 DPU_ENC_RC_EVENT_STOP,
107 DPU_ENC_RC_EVENT_ENTER_IDLE
108 };
109
110 /*
111 * enum dpu_enc_rc_states - states that the resource control maintains
112 * @DPU_ENC_RC_STATE_OFF: Resource is in OFF state
113 * @DPU_ENC_RC_STATE_PRE_OFF: Resource is transitioning to OFF state
114 * @DPU_ENC_RC_STATE_ON: Resource is in ON state
115 * @DPU_ENC_RC_STATE_MODESET: Resource is in modeset state
116 * @DPU_ENC_RC_STATE_IDLE: Resource is in IDLE state
117 */
118 enum dpu_enc_rc_states {
119 DPU_ENC_RC_STATE_OFF,
120 DPU_ENC_RC_STATE_PRE_OFF,
121 DPU_ENC_RC_STATE_ON,
122 DPU_ENC_RC_STATE_IDLE
123 };
124
125 /**
126 * struct dpu_encoder_virt - virtual encoder. Container of one or more physical
127 * encoders. Virtual encoder manages one "logical" display. Physical
128 * encoders manage one intf block, tied to a specific panel/sub-panel.
129 * Virtual encoder defers as much as possible to the physical encoders.
130 * Virtual encoder registers itself with the DRM Framework as the encoder.
131 * @base: drm_encoder base class for registration with DRM
132 * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
133 * @bus_scaling_client: Client handle to the bus scaling interface
134 * @enabled: True if the encoder is active, protected by enc_lock
135 * @num_phys_encs: Actual number of physical encoders contained.
136 * @phys_encs: Container of physical encoders managed.
137 * @cur_master: Pointer to the current master in this mode. Optimization
138 * Only valid after enable. Cleared as disable.
139 * @cur_slave: As above but for the slave encoder.
140 * @hw_pp: Handle to the pingpong blocks used for the display. No.
141 * pingpong blocks can be different than num_phys_encs.
142 * @intfs_swapped: Whether or not the phys_enc interfaces have been swapped
143 * for partial update right-only cases, such as pingpong
144 * split where virtual pingpong does not generate IRQs
145 * @crtc: Pointer to the currently assigned crtc. Normally you
146 * would use crtc->state->encoder_mask to determine the
147 * link between encoder/crtc. However in this case we need
148 * to track crtc in the disable() hook which is called
149 * _after_ encoder_mask is cleared.
150 * @crtc_kickoff_cb: Callback into CRTC that will flush & start
151 * all CTL paths
152 * @crtc_kickoff_cb_data: Opaque user data given to crtc_kickoff_cb
153 * @debugfs_root: Debug file system root file node
154 * @enc_lock: Lock around physical encoder
155 * create/destroy/enable/disable
156 * @frame_busy_mask: Bitmask tracking which phys_enc we are still
157 * busy processing current command.
158 * Bit0 = phys_encs[0] etc.
159 * @crtc_frame_event_cb: callback handler for frame event
160 * @crtc_frame_event_cb_data: callback handler private data
161 * @frame_done_timeout_ms: frame done timeout in ms
162 * @frame_done_timer: watchdog timer for frame done event
163 * @vsync_event_timer: vsync timer
164 * @disp_info: local copy of msm_display_info struct
165 * @idle_pc_supported: indicate if idle power collaps is supported
166 * @rc_lock: resource control mutex lock to protect
167 * virt encoder over various state changes
168 * @rc_state: resource controller state
169 * @delayed_off_work: delayed worker to schedule disabling of
170 * clks and resources after IDLE_TIMEOUT time.
171 * @vsync_event_work: worker to handle vsync event for autorefresh
172 * @topology: topology of the display
173 * @idle_timeout: idle timeout duration in milliseconds
174 * @dp: msm_dp pointer, for DP encoders
175 */
176 struct dpu_encoder_virt {
177 struct drm_encoder base;
178 spinlock_t enc_spinlock;
179 uint32_t bus_scaling_client;
180
181 bool enabled;
182
183 unsigned int num_phys_encs;
184 struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
185 struct dpu_encoder_phys *cur_master;
186 struct dpu_encoder_phys *cur_slave;
187 struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
188
189 bool intfs_swapped;
190
191 struct drm_crtc *crtc;
192
193 struct dentry *debugfs_root;
194 struct mutex enc_lock;
195 DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL);
196 void (*crtc_frame_event_cb)(void *, u32 event);
197 void *crtc_frame_event_cb_data;
198
199 atomic_t frame_done_timeout_ms;
200 struct timer_list frame_done_timer;
201 struct timer_list vsync_event_timer;
202
203 struct msm_display_info disp_info;
204
205 bool idle_pc_supported;
206 struct mutex rc_lock;
207 enum dpu_enc_rc_states rc_state;
208 struct delayed_work delayed_off_work;
209 struct kthread_work vsync_event_work;
210 struct msm_display_topology topology;
211
212 u32 idle_timeout;
213
214 struct msm_dp *dp;
215 };
216
217 #define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
218
219 static u32 dither_matrix[DITHER_MATRIX_SZ] = {
220 15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10
221 };
222
_dpu_encoder_setup_dither(struct dpu_hw_pingpong * hw_pp,unsigned bpc)223 static void _dpu_encoder_setup_dither(struct dpu_hw_pingpong *hw_pp, unsigned bpc)
224 {
225 struct dpu_hw_dither_cfg dither_cfg = { 0 };
226
227 if (!hw_pp->ops.setup_dither)
228 return;
229
230 switch (bpc) {
231 case 6:
232 dither_cfg.c0_bitdepth = 6;
233 dither_cfg.c1_bitdepth = 6;
234 dither_cfg.c2_bitdepth = 6;
235 dither_cfg.c3_bitdepth = 6;
236 dither_cfg.temporal_en = 0;
237 break;
238 default:
239 hw_pp->ops.setup_dither(hw_pp, NULL);
240 return;
241 }
242
243 memcpy(&dither_cfg.matrix, dither_matrix,
244 sizeof(u32) * DITHER_MATRIX_SZ);
245
246 hw_pp->ops.setup_dither(hw_pp, &dither_cfg);
247 }
248
dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys * phys_enc,enum dpu_intr_idx intr_idx)249 void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
250 enum dpu_intr_idx intr_idx)
251 {
252 DRM_ERROR("irq timeout id=%u, intf=%d, pp=%d, intr=%d\n",
253 DRMID(phys_enc->parent), phys_enc->intf_idx - INTF_0,
254 phys_enc->hw_pp->idx - PINGPONG_0, intr_idx);
255
256 if (phys_enc->parent_ops->handle_frame_done)
257 phys_enc->parent_ops->handle_frame_done(
258 phys_enc->parent, phys_enc,
259 DPU_ENCODER_FRAME_EVENT_ERROR);
260 }
261
262 static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id,
263 u32 irq_idx, struct dpu_encoder_wait_info *info);
264
dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys * phys_enc,enum dpu_intr_idx intr_idx,struct dpu_encoder_wait_info * wait_info)265 int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
266 enum dpu_intr_idx intr_idx,
267 struct dpu_encoder_wait_info *wait_info)
268 {
269 struct dpu_encoder_irq *irq;
270 u32 irq_status;
271 int ret;
272
273 if (!wait_info || intr_idx >= INTR_IDX_MAX) {
274 DPU_ERROR("invalid params\n");
275 return -EINVAL;
276 }
277 irq = &phys_enc->irq[intr_idx];
278
279 /* note: do master / slave checking outside */
280
281 /* return EWOULDBLOCK since we know the wait isn't necessary */
282 if (phys_enc->enable_state == DPU_ENC_DISABLED) {
283 DRM_ERROR("encoder is disabled id=%u, intr=%d, irq=%d\n",
284 DRMID(phys_enc->parent), intr_idx,
285 irq->irq_idx);
286 return -EWOULDBLOCK;
287 }
288
289 if (irq->irq_idx < 0) {
290 DRM_DEBUG_KMS("skip irq wait id=%u, intr=%d, irq=%s\n",
291 DRMID(phys_enc->parent), intr_idx,
292 irq->name);
293 return 0;
294 }
295
296 DRM_DEBUG_KMS("id=%u, intr=%d, irq=%d, pp=%d, pending_cnt=%d\n",
297 DRMID(phys_enc->parent), intr_idx,
298 irq->irq_idx, phys_enc->hw_pp->idx - PINGPONG_0,
299 atomic_read(wait_info->atomic_cnt));
300
301 ret = dpu_encoder_helper_wait_event_timeout(
302 DRMID(phys_enc->parent),
303 irq->irq_idx,
304 wait_info);
305
306 if (ret <= 0) {
307 irq_status = dpu_core_irq_read(phys_enc->dpu_kms,
308 irq->irq_idx, true);
309 if (irq_status) {
310 unsigned long flags;
311
312 DRM_DEBUG_KMS("irq not triggered id=%u, intr=%d, irq=%d, pp=%d, atomic_cnt=%d\n",
313 DRMID(phys_enc->parent), intr_idx,
314 irq->irq_idx,
315 phys_enc->hw_pp->idx - PINGPONG_0,
316 atomic_read(wait_info->atomic_cnt));
317 local_irq_save(flags);
318 irq->cb.func(phys_enc, irq->irq_idx);
319 local_irq_restore(flags);
320 ret = 0;
321 } else {
322 ret = -ETIMEDOUT;
323 DRM_DEBUG_KMS("irq timeout id=%u, intr=%d, irq=%d, pp=%d, atomic_cnt=%d\n",
324 DRMID(phys_enc->parent), intr_idx,
325 irq->irq_idx,
326 phys_enc->hw_pp->idx - PINGPONG_0,
327 atomic_read(wait_info->atomic_cnt));
328 }
329 } else {
330 ret = 0;
331 trace_dpu_enc_irq_wait_success(DRMID(phys_enc->parent),
332 intr_idx, irq->irq_idx,
333 phys_enc->hw_pp->idx - PINGPONG_0,
334 atomic_read(wait_info->atomic_cnt));
335 }
336
337 return ret;
338 }
339
dpu_encoder_helper_register_irq(struct dpu_encoder_phys * phys_enc,enum dpu_intr_idx intr_idx)340 int dpu_encoder_helper_register_irq(struct dpu_encoder_phys *phys_enc,
341 enum dpu_intr_idx intr_idx)
342 {
343 struct dpu_encoder_irq *irq;
344 int ret = 0;
345
346 if (intr_idx >= INTR_IDX_MAX) {
347 DPU_ERROR("invalid params\n");
348 return -EINVAL;
349 }
350 irq = &phys_enc->irq[intr_idx];
351
352 if (irq->irq_idx < 0) {
353 DPU_ERROR_PHYS(phys_enc,
354 "invalid IRQ index:%d\n", irq->irq_idx);
355 return -EINVAL;
356 }
357
358 ret = dpu_core_irq_register_callback(phys_enc->dpu_kms, irq->irq_idx,
359 &irq->cb);
360 if (ret) {
361 DPU_ERROR_PHYS(phys_enc,
362 "failed to register IRQ callback for %s\n",
363 irq->name);
364 irq->irq_idx = -EINVAL;
365 return ret;
366 }
367
368 trace_dpu_enc_irq_register_success(DRMID(phys_enc->parent), intr_idx,
369 irq->irq_idx);
370
371 return ret;
372 }
373
dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys * phys_enc,enum dpu_intr_idx intr_idx)374 int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
375 enum dpu_intr_idx intr_idx)
376 {
377 struct dpu_encoder_irq *irq;
378 int ret;
379
380 irq = &phys_enc->irq[intr_idx];
381
382 /* silently skip irqs that weren't registered */
383 if (irq->irq_idx < 0) {
384 DRM_ERROR("duplicate unregister id=%u, intr=%d, irq=%d",
385 DRMID(phys_enc->parent), intr_idx,
386 irq->irq_idx);
387 return 0;
388 }
389
390 ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms, irq->irq_idx,
391 &irq->cb);
392 if (ret) {
393 DRM_ERROR("unreg cb fail id=%u, intr=%d, irq=%d ret=%d",
394 DRMID(phys_enc->parent), intr_idx,
395 irq->irq_idx, ret);
396 }
397
398 trace_dpu_enc_irq_unregister_success(DRMID(phys_enc->parent), intr_idx,
399 irq->irq_idx);
400
401 return 0;
402 }
403
dpu_encoder_get_frame_count(struct drm_encoder * drm_enc)404 int dpu_encoder_get_frame_count(struct drm_encoder *drm_enc)
405 {
406 struct dpu_encoder_virt *dpu_enc;
407 struct dpu_encoder_phys *phys;
408 int framecount = 0;
409
410 dpu_enc = to_dpu_encoder_virt(drm_enc);
411 phys = dpu_enc ? dpu_enc->cur_master : NULL;
412
413 if (phys && phys->ops.get_frame_count)
414 framecount = phys->ops.get_frame_count(phys);
415
416 return framecount;
417 }
418
dpu_encoder_get_linecount(struct drm_encoder * drm_enc)419 int dpu_encoder_get_linecount(struct drm_encoder *drm_enc)
420 {
421 struct dpu_encoder_virt *dpu_enc;
422 struct dpu_encoder_phys *phys;
423 int linecount = 0;
424
425 dpu_enc = to_dpu_encoder_virt(drm_enc);
426 phys = dpu_enc ? dpu_enc->cur_master : NULL;
427
428 if (phys && phys->ops.get_line_count)
429 linecount = phys->ops.get_line_count(phys);
430
431 return linecount;
432 }
433
dpu_encoder_get_hw_resources(struct drm_encoder * drm_enc,struct dpu_encoder_hw_resources * hw_res)434 void dpu_encoder_get_hw_resources(struct drm_encoder *drm_enc,
435 struct dpu_encoder_hw_resources *hw_res)
436 {
437 struct dpu_encoder_virt *dpu_enc = NULL;
438 int i = 0;
439
440 dpu_enc = to_dpu_encoder_virt(drm_enc);
441 DPU_DEBUG_ENC(dpu_enc, "\n");
442
443 /* Query resources used by phys encs, expected to be without overlap */
444 memset(hw_res, 0, sizeof(*hw_res));
445
446 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
447 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
448
449 if (phys->ops.get_hw_resources)
450 phys->ops.get_hw_resources(phys, hw_res);
451 }
452 }
453
dpu_encoder_destroy(struct drm_encoder * drm_enc)454 static void dpu_encoder_destroy(struct drm_encoder *drm_enc)
455 {
456 struct dpu_encoder_virt *dpu_enc = NULL;
457 int i = 0;
458
459 if (!drm_enc) {
460 DPU_ERROR("invalid encoder\n");
461 return;
462 }
463
464 dpu_enc = to_dpu_encoder_virt(drm_enc);
465 DPU_DEBUG_ENC(dpu_enc, "\n");
466
467 mutex_lock(&dpu_enc->enc_lock);
468
469 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
470 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
471
472 if (phys->ops.destroy) {
473 phys->ops.destroy(phys);
474 --dpu_enc->num_phys_encs;
475 dpu_enc->phys_encs[i] = NULL;
476 }
477 }
478
479 if (dpu_enc->num_phys_encs)
480 DPU_ERROR_ENC(dpu_enc, "expected 0 num_phys_encs not %d\n",
481 dpu_enc->num_phys_encs);
482 dpu_enc->num_phys_encs = 0;
483 mutex_unlock(&dpu_enc->enc_lock);
484
485 drm_encoder_cleanup(drm_enc);
486 mutex_destroy(&dpu_enc->enc_lock);
487 }
488
dpu_encoder_helper_split_config(struct dpu_encoder_phys * phys_enc,enum dpu_intf interface)489 void dpu_encoder_helper_split_config(
490 struct dpu_encoder_phys *phys_enc,
491 enum dpu_intf interface)
492 {
493 struct dpu_encoder_virt *dpu_enc;
494 struct split_pipe_cfg cfg = { 0 };
495 struct dpu_hw_mdp *hw_mdptop;
496 struct msm_display_info *disp_info;
497
498 if (!phys_enc->hw_mdptop || !phys_enc->parent) {
499 DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL);
500 return;
501 }
502
503 dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
504 hw_mdptop = phys_enc->hw_mdptop;
505 disp_info = &dpu_enc->disp_info;
506
507 if (disp_info->intf_type != DRM_MODE_ENCODER_DSI)
508 return;
509
510 /**
511 * disable split modes since encoder will be operating in as the only
512 * encoder, either for the entire use case in the case of, for example,
513 * single DSI, or for this frame in the case of left/right only partial
514 * update.
515 */
516 if (phys_enc->split_role == ENC_ROLE_SOLO) {
517 if (hw_mdptop->ops.setup_split_pipe)
518 hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
519 return;
520 }
521
522 cfg.en = true;
523 cfg.mode = phys_enc->intf_mode;
524 cfg.intf = interface;
525
526 if (cfg.en && phys_enc->ops.needs_single_flush &&
527 phys_enc->ops.needs_single_flush(phys_enc))
528 cfg.split_flush_en = true;
529
530 if (phys_enc->split_role == ENC_ROLE_MASTER) {
531 DPU_DEBUG_ENC(dpu_enc, "enable %d\n", cfg.en);
532
533 if (hw_mdptop->ops.setup_split_pipe)
534 hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
535 }
536 }
537
dpu_encoder_get_topology(struct dpu_encoder_virt * dpu_enc,struct dpu_kms * dpu_kms,struct drm_display_mode * mode)538 static struct msm_display_topology dpu_encoder_get_topology(
539 struct dpu_encoder_virt *dpu_enc,
540 struct dpu_kms *dpu_kms,
541 struct drm_display_mode *mode)
542 {
543 struct msm_display_topology topology = {0};
544 int i, intf_count = 0;
545
546 for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
547 if (dpu_enc->phys_encs[i])
548 intf_count++;
549
550 /* Datapath topology selection
551 *
552 * Dual display
553 * 2 LM, 2 INTF ( Split display using 2 interfaces)
554 *
555 * Single display
556 * 1 LM, 1 INTF
557 * 2 LM, 1 INTF (stream merge to support high resolution interfaces)
558 *
559 * Adding color blocks only to primary interface if available in
560 * sufficient number
561 */
562 if (intf_count == 2)
563 topology.num_lm = 2;
564 else if (!dpu_kms->catalog->caps->has_3d_merge)
565 topology.num_lm = 1;
566 else
567 topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
568
569 if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI) {
570 if (dpu_kms->catalog->dspp &&
571 (dpu_kms->catalog->dspp_count >= topology.num_lm))
572 topology.num_dspp = topology.num_lm;
573 }
574
575 topology.num_enc = 0;
576 topology.num_intf = intf_count;
577
578 return topology;
579 }
dpu_encoder_virt_atomic_check(struct drm_encoder * drm_enc,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state)580 static int dpu_encoder_virt_atomic_check(
581 struct drm_encoder *drm_enc,
582 struct drm_crtc_state *crtc_state,
583 struct drm_connector_state *conn_state)
584 {
585 struct dpu_encoder_virt *dpu_enc;
586 struct msm_drm_private *priv;
587 struct dpu_kms *dpu_kms;
588 const struct drm_display_mode *mode;
589 struct drm_display_mode *adj_mode;
590 struct msm_display_topology topology;
591 struct dpu_global_state *global_state;
592 int i = 0;
593 int ret = 0;
594
595 if (!drm_enc || !crtc_state || !conn_state) {
596 DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n",
597 drm_enc != NULL, crtc_state != NULL, conn_state != NULL);
598 return -EINVAL;
599 }
600
601 dpu_enc = to_dpu_encoder_virt(drm_enc);
602 DPU_DEBUG_ENC(dpu_enc, "\n");
603
604 priv = drm_enc->dev->dev_private;
605 dpu_kms = to_dpu_kms(priv->kms);
606 mode = &crtc_state->mode;
607 adj_mode = &crtc_state->adjusted_mode;
608 global_state = dpu_kms_get_global_state(crtc_state->state);
609 if (IS_ERR(global_state))
610 return PTR_ERR(global_state);
611
612 trace_dpu_enc_atomic_check(DRMID(drm_enc));
613
614 /* perform atomic check on the first physical encoder (master) */
615 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
616 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
617
618 if (phys->ops.atomic_check)
619 ret = phys->ops.atomic_check(phys, crtc_state,
620 conn_state);
621 else if (phys->ops.mode_fixup)
622 if (!phys->ops.mode_fixup(phys, mode, adj_mode))
623 ret = -EINVAL;
624
625 if (ret) {
626 DPU_ERROR_ENC(dpu_enc,
627 "mode unsupported, phys idx %d\n", i);
628 break;
629 }
630 }
631
632 topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
633
634 /* Reserve dynamic resources now. */
635 if (!ret) {
636 /*
637 * Release and Allocate resources on every modeset
638 * Dont allocate when active is false.
639 */
640 if (drm_atomic_crtc_needs_modeset(crtc_state)) {
641 dpu_rm_release(global_state, drm_enc);
642
643 if (!crtc_state->active_changed || crtc_state->enable)
644 ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
645 drm_enc, crtc_state, topology);
646 }
647 }
648
649 trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags);
650
651 return ret;
652 }
653
_dpu_encoder_update_vsync_source(struct dpu_encoder_virt * dpu_enc,struct msm_display_info * disp_info)654 static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc,
655 struct msm_display_info *disp_info)
656 {
657 struct dpu_vsync_source_cfg vsync_cfg = { 0 };
658 struct msm_drm_private *priv;
659 struct dpu_kms *dpu_kms;
660 struct dpu_hw_mdp *hw_mdptop;
661 struct drm_encoder *drm_enc;
662 int i;
663
664 if (!dpu_enc || !disp_info) {
665 DPU_ERROR("invalid param dpu_enc:%d or disp_info:%d\n",
666 dpu_enc != NULL, disp_info != NULL);
667 return;
668 } else if (dpu_enc->num_phys_encs > ARRAY_SIZE(dpu_enc->hw_pp)) {
669 DPU_ERROR("invalid num phys enc %d/%d\n",
670 dpu_enc->num_phys_encs,
671 (int) ARRAY_SIZE(dpu_enc->hw_pp));
672 return;
673 }
674
675 drm_enc = &dpu_enc->base;
676 /* this pointers are checked in virt_enable_helper */
677 priv = drm_enc->dev->dev_private;
678
679 dpu_kms = to_dpu_kms(priv->kms);
680 hw_mdptop = dpu_kms->hw_mdp;
681 if (!hw_mdptop) {
682 DPU_ERROR("invalid mdptop\n");
683 return;
684 }
685
686 if (hw_mdptop->ops.setup_vsync_source &&
687 disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) {
688 for (i = 0; i < dpu_enc->num_phys_encs; i++)
689 vsync_cfg.ppnumber[i] = dpu_enc->hw_pp[i]->idx;
690
691 vsync_cfg.pp_count = dpu_enc->num_phys_encs;
692 if (disp_info->is_te_using_watchdog_timer)
693 vsync_cfg.vsync_source = DPU_VSYNC_SOURCE_WD_TIMER_0;
694 else
695 vsync_cfg.vsync_source = DPU_VSYNC0_SOURCE_GPIO;
696
697 hw_mdptop->ops.setup_vsync_source(hw_mdptop, &vsync_cfg);
698 }
699 }
700
_dpu_encoder_irq_control(struct drm_encoder * drm_enc,bool enable)701 static void _dpu_encoder_irq_control(struct drm_encoder *drm_enc, bool enable)
702 {
703 struct dpu_encoder_virt *dpu_enc;
704 int i;
705
706 if (!drm_enc) {
707 DPU_ERROR("invalid encoder\n");
708 return;
709 }
710
711 dpu_enc = to_dpu_encoder_virt(drm_enc);
712
713 DPU_DEBUG_ENC(dpu_enc, "enable:%d\n", enable);
714 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
715 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
716
717 if (phys->ops.irq_control)
718 phys->ops.irq_control(phys, enable);
719 }
720
721 }
722
_dpu_encoder_resource_control_helper(struct drm_encoder * drm_enc,bool enable)723 static void _dpu_encoder_resource_control_helper(struct drm_encoder *drm_enc,
724 bool enable)
725 {
726 struct msm_drm_private *priv;
727 struct dpu_kms *dpu_kms;
728 struct dpu_encoder_virt *dpu_enc;
729
730 dpu_enc = to_dpu_encoder_virt(drm_enc);
731 priv = drm_enc->dev->dev_private;
732 dpu_kms = to_dpu_kms(priv->kms);
733
734 trace_dpu_enc_rc_helper(DRMID(drm_enc), enable);
735
736 if (!dpu_enc->cur_master) {
737 DPU_ERROR("encoder master not set\n");
738 return;
739 }
740
741 if (enable) {
742 /* enable DPU core clks */
743 pm_runtime_get_sync(&dpu_kms->pdev->dev);
744
745 /* enable all the irq */
746 _dpu_encoder_irq_control(drm_enc, true);
747
748 } else {
749 /* disable all the irq */
750 _dpu_encoder_irq_control(drm_enc, false);
751
752 /* disable DPU core clks */
753 pm_runtime_put_sync(&dpu_kms->pdev->dev);
754 }
755
756 }
757
dpu_encoder_resource_control(struct drm_encoder * drm_enc,u32 sw_event)758 static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
759 u32 sw_event)
760 {
761 struct dpu_encoder_virt *dpu_enc;
762 struct msm_drm_private *priv;
763 bool is_vid_mode = false;
764
765 if (!drm_enc || !drm_enc->dev || !drm_enc->crtc) {
766 DPU_ERROR("invalid parameters\n");
767 return -EINVAL;
768 }
769 dpu_enc = to_dpu_encoder_virt(drm_enc);
770 priv = drm_enc->dev->dev_private;
771 is_vid_mode = dpu_enc->disp_info.capabilities &
772 MSM_DISPLAY_CAP_VID_MODE;
773
774 /*
775 * when idle_pc is not supported, process only KICKOFF, STOP and MODESET
776 * events and return early for other events (ie wb display).
777 */
778 if (!dpu_enc->idle_pc_supported &&
779 (sw_event != DPU_ENC_RC_EVENT_KICKOFF &&
780 sw_event != DPU_ENC_RC_EVENT_STOP &&
781 sw_event != DPU_ENC_RC_EVENT_PRE_STOP))
782 return 0;
783
784 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, dpu_enc->idle_pc_supported,
785 dpu_enc->rc_state, "begin");
786
787 switch (sw_event) {
788 case DPU_ENC_RC_EVENT_KICKOFF:
789 /* cancel delayed off work, if any */
790 if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
791 DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
792 sw_event);
793
794 mutex_lock(&dpu_enc->rc_lock);
795
796 /* return if the resource control is already in ON state */
797 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
798 DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in ON state\n",
799 DRMID(drm_enc), sw_event);
800 mutex_unlock(&dpu_enc->rc_lock);
801 return 0;
802 } else if (dpu_enc->rc_state != DPU_ENC_RC_STATE_OFF &&
803 dpu_enc->rc_state != DPU_ENC_RC_STATE_IDLE) {
804 DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in state %d\n",
805 DRMID(drm_enc), sw_event,
806 dpu_enc->rc_state);
807 mutex_unlock(&dpu_enc->rc_lock);
808 return -EINVAL;
809 }
810
811 if (is_vid_mode && dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE)
812 _dpu_encoder_irq_control(drm_enc, true);
813 else
814 _dpu_encoder_resource_control_helper(drm_enc, true);
815
816 dpu_enc->rc_state = DPU_ENC_RC_STATE_ON;
817
818 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
819 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
820 "kickoff");
821
822 mutex_unlock(&dpu_enc->rc_lock);
823 break;
824
825 case DPU_ENC_RC_EVENT_FRAME_DONE:
826 /*
827 * mutex lock is not used as this event happens at interrupt
828 * context. And locking is not required as, the other events
829 * like KICKOFF and STOP does a wait-for-idle before executing
830 * the resource_control
831 */
832 if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
833 DRM_DEBUG_KMS("id:%d, sw_event:%d,rc:%d-unexpected\n",
834 DRMID(drm_enc), sw_event,
835 dpu_enc->rc_state);
836 return -EINVAL;
837 }
838
839 /*
840 * schedule off work item only when there are no
841 * frames pending
842 */
843 if (dpu_crtc_frame_pending(drm_enc->crtc) > 1) {
844 DRM_DEBUG_KMS("id:%d skip schedule work\n",
845 DRMID(drm_enc));
846 return 0;
847 }
848
849 queue_delayed_work(priv->wq, &dpu_enc->delayed_off_work,
850 msecs_to_jiffies(dpu_enc->idle_timeout));
851
852 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
853 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
854 "frame done");
855 break;
856
857 case DPU_ENC_RC_EVENT_PRE_STOP:
858 /* cancel delayed off work, if any */
859 if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
860 DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
861 sw_event);
862
863 mutex_lock(&dpu_enc->rc_lock);
864
865 if (is_vid_mode &&
866 dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
867 _dpu_encoder_irq_control(drm_enc, true);
868 }
869 /* skip if is already OFF or IDLE, resources are off already */
870 else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF ||
871 dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
872 DRM_DEBUG_KMS("id:%u, sw_event:%d, rc in %d state\n",
873 DRMID(drm_enc), sw_event,
874 dpu_enc->rc_state);
875 mutex_unlock(&dpu_enc->rc_lock);
876 return 0;
877 }
878
879 dpu_enc->rc_state = DPU_ENC_RC_STATE_PRE_OFF;
880
881 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
882 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
883 "pre stop");
884
885 mutex_unlock(&dpu_enc->rc_lock);
886 break;
887
888 case DPU_ENC_RC_EVENT_STOP:
889 mutex_lock(&dpu_enc->rc_lock);
890
891 /* return if the resource control is already in OFF state */
892 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF) {
893 DRM_DEBUG_KMS("id: %u, sw_event:%d, rc in OFF state\n",
894 DRMID(drm_enc), sw_event);
895 mutex_unlock(&dpu_enc->rc_lock);
896 return 0;
897 } else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
898 DRM_ERROR("id: %u, sw_event:%d, rc in state %d\n",
899 DRMID(drm_enc), sw_event, dpu_enc->rc_state);
900 mutex_unlock(&dpu_enc->rc_lock);
901 return -EINVAL;
902 }
903
904 /**
905 * expect to arrive here only if in either idle state or pre-off
906 * and in IDLE state the resources are already disabled
907 */
908 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_PRE_OFF)
909 _dpu_encoder_resource_control_helper(drm_enc, false);
910
911 dpu_enc->rc_state = DPU_ENC_RC_STATE_OFF;
912
913 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
914 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
915 "stop");
916
917 mutex_unlock(&dpu_enc->rc_lock);
918 break;
919
920 case DPU_ENC_RC_EVENT_ENTER_IDLE:
921 mutex_lock(&dpu_enc->rc_lock);
922
923 if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
924 DRM_ERROR("id: %u, sw_event:%d, rc:%d !ON state\n",
925 DRMID(drm_enc), sw_event, dpu_enc->rc_state);
926 mutex_unlock(&dpu_enc->rc_lock);
927 return 0;
928 }
929
930 /*
931 * if we are in ON but a frame was just kicked off,
932 * ignore the IDLE event, it's probably a stale timer event
933 */
934 if (dpu_enc->frame_busy_mask[0]) {
935 DRM_ERROR("id:%u, sw_event:%d, rc:%d frame pending\n",
936 DRMID(drm_enc), sw_event, dpu_enc->rc_state);
937 mutex_unlock(&dpu_enc->rc_lock);
938 return 0;
939 }
940
941 if (is_vid_mode)
942 _dpu_encoder_irq_control(drm_enc, false);
943 else
944 _dpu_encoder_resource_control_helper(drm_enc, false);
945
946 dpu_enc->rc_state = DPU_ENC_RC_STATE_IDLE;
947
948 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
949 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
950 "idle");
951
952 mutex_unlock(&dpu_enc->rc_lock);
953 break;
954
955 default:
956 DRM_ERROR("id:%u, unexpected sw_event: %d\n", DRMID(drm_enc),
957 sw_event);
958 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
959 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
960 "error");
961 break;
962 }
963
964 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
965 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
966 "end");
967 return 0;
968 }
969
dpu_encoder_virt_mode_set(struct drm_encoder * drm_enc,struct drm_display_mode * mode,struct drm_display_mode * adj_mode)970 static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
971 struct drm_display_mode *mode,
972 struct drm_display_mode *adj_mode)
973 {
974 struct dpu_encoder_virt *dpu_enc;
975 struct msm_drm_private *priv;
976 struct dpu_kms *dpu_kms;
977 struct list_head *connector_list;
978 struct drm_connector *conn = NULL, *conn_iter;
979 struct drm_crtc *drm_crtc;
980 struct dpu_crtc_state *cstate;
981 struct dpu_global_state *global_state;
982 struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC];
983 struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
984 struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
985 struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC] = { NULL };
986 int num_lm, num_ctl, num_pp;
987 int i, j;
988
989 if (!drm_enc) {
990 DPU_ERROR("invalid encoder\n");
991 return;
992 }
993
994 dpu_enc = to_dpu_encoder_virt(drm_enc);
995 DPU_DEBUG_ENC(dpu_enc, "\n");
996
997 priv = drm_enc->dev->dev_private;
998 dpu_kms = to_dpu_kms(priv->kms);
999 connector_list = &dpu_kms->dev->mode_config.connector_list;
1000
1001 global_state = dpu_kms_get_existing_global_state(dpu_kms);
1002 if (IS_ERR_OR_NULL(global_state)) {
1003 DPU_ERROR("Failed to get global state");
1004 return;
1005 }
1006
1007 trace_dpu_enc_mode_set(DRMID(drm_enc));
1008
1009 if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS)
1010 msm_dp_display_mode_set(dpu_enc->dp, drm_enc, mode, adj_mode);
1011
1012 list_for_each_entry(conn_iter, connector_list, head)
1013 if (conn_iter->encoder == drm_enc)
1014 conn = conn_iter;
1015
1016 if (!conn) {
1017 DPU_ERROR_ENC(dpu_enc, "failed to find attached connector\n");
1018 return;
1019 } else if (!conn->state) {
1020 DPU_ERROR_ENC(dpu_enc, "invalid connector state\n");
1021 return;
1022 }
1023
1024 drm_for_each_crtc(drm_crtc, drm_enc->dev)
1025 if (drm_crtc->state->encoder_mask & drm_encoder_mask(drm_enc))
1026 break;
1027
1028 /* Query resource that have been reserved in atomic check step. */
1029 num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1030 drm_enc->base.id, DPU_HW_BLK_PINGPONG, hw_pp,
1031 ARRAY_SIZE(hw_pp));
1032 num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1033 drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
1034 num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1035 drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
1036 dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1037 drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp,
1038 ARRAY_SIZE(hw_dspp));
1039
1040 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
1041 dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i])
1042 : NULL;
1043
1044 cstate = to_dpu_crtc_state(drm_crtc->state);
1045
1046 for (i = 0; i < num_lm; i++) {
1047 int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
1048
1049 cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]);
1050 cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]);
1051 cstate->mixers[i].hw_dspp = to_dpu_hw_dspp(hw_dspp[i]);
1052 }
1053
1054 cstate->num_mixers = num_lm;
1055
1056 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1057 int num_blk;
1058 struct dpu_hw_blk *hw_blk[MAX_CHANNELS_PER_ENC];
1059 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1060
1061 if (!dpu_enc->hw_pp[i]) {
1062 DPU_ERROR_ENC(dpu_enc,
1063 "no pp block assigned at idx: %d\n", i);
1064 return;
1065 }
1066
1067 if (!hw_ctl[i]) {
1068 DPU_ERROR_ENC(dpu_enc,
1069 "no ctl block assigned at idx: %d\n", i);
1070 return;
1071 }
1072
1073 phys->hw_pp = dpu_enc->hw_pp[i];
1074 phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]);
1075
1076 num_blk = dpu_rm_get_assigned_resources(&dpu_kms->rm,
1077 global_state, drm_enc->base.id, DPU_HW_BLK_INTF,
1078 hw_blk, ARRAY_SIZE(hw_blk));
1079 for (j = 0; j < num_blk; j++) {
1080 struct dpu_hw_intf *hw_intf;
1081
1082 hw_intf = to_dpu_hw_intf(hw_blk[i]);
1083 if (hw_intf->idx == phys->intf_idx)
1084 phys->hw_intf = hw_intf;
1085 }
1086
1087 if (!phys->hw_intf) {
1088 DPU_ERROR_ENC(dpu_enc,
1089 "no intf block assigned at idx: %d\n", i);
1090 return;
1091 }
1092
1093 phys->connector = conn->state->connector;
1094 if (phys->ops.mode_set)
1095 phys->ops.mode_set(phys, mode, adj_mode);
1096 }
1097 }
1098
_dpu_encoder_virt_enable_helper(struct drm_encoder * drm_enc)1099 static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
1100 {
1101 struct dpu_encoder_virt *dpu_enc = NULL;
1102 int i;
1103
1104 if (!drm_enc || !drm_enc->dev) {
1105 DPU_ERROR("invalid parameters\n");
1106 return;
1107 }
1108
1109 dpu_enc = to_dpu_encoder_virt(drm_enc);
1110 if (!dpu_enc || !dpu_enc->cur_master) {
1111 DPU_ERROR("invalid dpu encoder/master\n");
1112 return;
1113 }
1114
1115
1116 if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_TMDS &&
1117 dpu_enc->cur_master->hw_mdptop &&
1118 dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select)
1119 dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select(
1120 dpu_enc->cur_master->hw_mdptop);
1121
1122 _dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info);
1123
1124 if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI &&
1125 !WARN_ON(dpu_enc->num_phys_encs == 0)) {
1126 unsigned bpc = dpu_enc->phys_encs[0]->connector->display_info.bpc;
1127 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
1128 if (!dpu_enc->hw_pp[i])
1129 continue;
1130 _dpu_encoder_setup_dither(dpu_enc->hw_pp[i], bpc);
1131 }
1132 }
1133 }
1134
dpu_encoder_virt_runtime_resume(struct drm_encoder * drm_enc)1135 void dpu_encoder_virt_runtime_resume(struct drm_encoder *drm_enc)
1136 {
1137 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1138
1139 mutex_lock(&dpu_enc->enc_lock);
1140
1141 if (!dpu_enc->enabled)
1142 goto out;
1143
1144 if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.restore)
1145 dpu_enc->cur_slave->ops.restore(dpu_enc->cur_slave);
1146 if (dpu_enc->cur_master && dpu_enc->cur_master->ops.restore)
1147 dpu_enc->cur_master->ops.restore(dpu_enc->cur_master);
1148
1149 _dpu_encoder_virt_enable_helper(drm_enc);
1150
1151 out:
1152 mutex_unlock(&dpu_enc->enc_lock);
1153 }
1154
dpu_encoder_virt_enable(struct drm_encoder * drm_enc)1155 static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
1156 {
1157 struct dpu_encoder_virt *dpu_enc = NULL;
1158 int ret = 0;
1159 struct msm_drm_private *priv;
1160 struct drm_display_mode *cur_mode = NULL;
1161
1162 if (!drm_enc) {
1163 DPU_ERROR("invalid encoder\n");
1164 return;
1165 }
1166 dpu_enc = to_dpu_encoder_virt(drm_enc);
1167
1168 mutex_lock(&dpu_enc->enc_lock);
1169 cur_mode = &dpu_enc->base.crtc->state->adjusted_mode;
1170 priv = drm_enc->dev->dev_private;
1171
1172 trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay,
1173 cur_mode->vdisplay);
1174
1175 /* always enable slave encoder before master */
1176 if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.enable)
1177 dpu_enc->cur_slave->ops.enable(dpu_enc->cur_slave);
1178
1179 if (dpu_enc->cur_master && dpu_enc->cur_master->ops.enable)
1180 dpu_enc->cur_master->ops.enable(dpu_enc->cur_master);
1181
1182 ret = dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
1183 if (ret) {
1184 DPU_ERROR_ENC(dpu_enc, "dpu resource control failed: %d\n",
1185 ret);
1186 goto out;
1187 }
1188
1189 _dpu_encoder_virt_enable_helper(drm_enc);
1190
1191 if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS) {
1192 ret = msm_dp_display_enable(dpu_enc->dp, drm_enc);
1193 if (ret) {
1194 DPU_ERROR_ENC(dpu_enc, "dp display enable failed: %d\n",
1195 ret);
1196 goto out;
1197 }
1198 }
1199 dpu_enc->enabled = true;
1200
1201 out:
1202 mutex_unlock(&dpu_enc->enc_lock);
1203 }
1204
dpu_encoder_virt_disable(struct drm_encoder * drm_enc)1205 static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
1206 {
1207 struct dpu_encoder_virt *dpu_enc = NULL;
1208 struct msm_drm_private *priv;
1209 int i = 0;
1210
1211 if (!drm_enc) {
1212 DPU_ERROR("invalid encoder\n");
1213 return;
1214 } else if (!drm_enc->dev) {
1215 DPU_ERROR("invalid dev\n");
1216 return;
1217 }
1218
1219 dpu_enc = to_dpu_encoder_virt(drm_enc);
1220 DPU_DEBUG_ENC(dpu_enc, "\n");
1221
1222 mutex_lock(&dpu_enc->enc_lock);
1223 dpu_enc->enabled = false;
1224
1225 priv = drm_enc->dev->dev_private;
1226
1227 trace_dpu_enc_disable(DRMID(drm_enc));
1228
1229 /* wait for idle */
1230 dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
1231
1232 if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS) {
1233 if (msm_dp_display_pre_disable(dpu_enc->dp, drm_enc))
1234 DPU_ERROR_ENC(dpu_enc, "dp display push idle failed\n");
1235 }
1236
1237 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP);
1238
1239 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1240 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1241
1242 if (phys->ops.disable)
1243 phys->ops.disable(phys);
1244 }
1245
1246
1247 /* after phys waits for frame-done, should be no more frames pending */
1248 if (atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
1249 DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id);
1250 del_timer_sync(&dpu_enc->frame_done_timer);
1251 }
1252
1253 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP);
1254
1255 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1256 dpu_enc->phys_encs[i]->connector = NULL;
1257 }
1258
1259 DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
1260
1261 if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS) {
1262 if (msm_dp_display_disable(dpu_enc->dp, drm_enc))
1263 DPU_ERROR_ENC(dpu_enc, "dp display disable failed\n");
1264 }
1265
1266 mutex_unlock(&dpu_enc->enc_lock);
1267 }
1268
dpu_encoder_get_intf(struct dpu_mdss_cfg * catalog,enum dpu_intf_type type,u32 controller_id)1269 static enum dpu_intf dpu_encoder_get_intf(struct dpu_mdss_cfg *catalog,
1270 enum dpu_intf_type type, u32 controller_id)
1271 {
1272 int i = 0;
1273
1274 for (i = 0; i < catalog->intf_count; i++) {
1275 if (catalog->intf[i].type == type
1276 && catalog->intf[i].controller_id == controller_id) {
1277 return catalog->intf[i].id;
1278 }
1279 }
1280
1281 return INTF_MAX;
1282 }
1283
dpu_encoder_vblank_callback(struct drm_encoder * drm_enc,struct dpu_encoder_phys * phy_enc)1284 static void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
1285 struct dpu_encoder_phys *phy_enc)
1286 {
1287 struct dpu_encoder_virt *dpu_enc = NULL;
1288 unsigned long lock_flags;
1289
1290 if (!drm_enc || !phy_enc)
1291 return;
1292
1293 DPU_ATRACE_BEGIN("encoder_vblank_callback");
1294 dpu_enc = to_dpu_encoder_virt(drm_enc);
1295
1296 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1297 if (dpu_enc->crtc)
1298 dpu_crtc_vblank_callback(dpu_enc->crtc);
1299 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1300
1301 atomic_inc(&phy_enc->vsync_cnt);
1302 DPU_ATRACE_END("encoder_vblank_callback");
1303 }
1304
dpu_encoder_underrun_callback(struct drm_encoder * drm_enc,struct dpu_encoder_phys * phy_enc)1305 static void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc,
1306 struct dpu_encoder_phys *phy_enc)
1307 {
1308 if (!phy_enc)
1309 return;
1310
1311 DPU_ATRACE_BEGIN("encoder_underrun_callback");
1312 atomic_inc(&phy_enc->underrun_cnt);
1313
1314 /* trigger dump only on the first underrun */
1315 if (atomic_read(&phy_enc->underrun_cnt) == 1)
1316 msm_disp_snapshot_state(drm_enc->dev);
1317
1318 trace_dpu_enc_underrun_cb(DRMID(drm_enc),
1319 atomic_read(&phy_enc->underrun_cnt));
1320 DPU_ATRACE_END("encoder_underrun_callback");
1321 }
1322
dpu_encoder_assign_crtc(struct drm_encoder * drm_enc,struct drm_crtc * crtc)1323 void dpu_encoder_assign_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc)
1324 {
1325 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1326 unsigned long lock_flags;
1327
1328 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1329 /* crtc should always be cleared before re-assigning */
1330 WARN_ON(crtc && dpu_enc->crtc);
1331 dpu_enc->crtc = crtc;
1332 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1333 }
1334
dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder * drm_enc,struct drm_crtc * crtc,bool enable)1335 void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc,
1336 struct drm_crtc *crtc, bool enable)
1337 {
1338 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1339 unsigned long lock_flags;
1340 int i;
1341
1342 trace_dpu_enc_vblank_cb(DRMID(drm_enc), enable);
1343
1344 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1345 if (dpu_enc->crtc != crtc) {
1346 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1347 return;
1348 }
1349 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1350
1351 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1352 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1353
1354 if (phys->ops.control_vblank_irq)
1355 phys->ops.control_vblank_irq(phys, enable);
1356 }
1357 }
1358
dpu_encoder_register_frame_event_callback(struct drm_encoder * drm_enc,void (* frame_event_cb)(void *,u32 event),void * frame_event_cb_data)1359 void dpu_encoder_register_frame_event_callback(struct drm_encoder *drm_enc,
1360 void (*frame_event_cb)(void *, u32 event),
1361 void *frame_event_cb_data)
1362 {
1363 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1364 unsigned long lock_flags;
1365 bool enable;
1366
1367 enable = frame_event_cb ? true : false;
1368
1369 if (!drm_enc) {
1370 DPU_ERROR("invalid encoder\n");
1371 return;
1372 }
1373 trace_dpu_enc_frame_event_cb(DRMID(drm_enc), enable);
1374
1375 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1376 dpu_enc->crtc_frame_event_cb = frame_event_cb;
1377 dpu_enc->crtc_frame_event_cb_data = frame_event_cb_data;
1378 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1379 }
1380
dpu_encoder_frame_done_callback(struct drm_encoder * drm_enc,struct dpu_encoder_phys * ready_phys,u32 event)1381 static void dpu_encoder_frame_done_callback(
1382 struct drm_encoder *drm_enc,
1383 struct dpu_encoder_phys *ready_phys, u32 event)
1384 {
1385 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1386 unsigned int i;
1387
1388 if (event & (DPU_ENCODER_FRAME_EVENT_DONE
1389 | DPU_ENCODER_FRAME_EVENT_ERROR
1390 | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
1391
1392 if (!dpu_enc->frame_busy_mask[0]) {
1393 /**
1394 * suppress frame_done without waiter,
1395 * likely autorefresh
1396 */
1397 trace_dpu_enc_frame_done_cb_not_busy(DRMID(drm_enc),
1398 event, ready_phys->intf_idx);
1399 return;
1400 }
1401
1402 /* One of the physical encoders has become idle */
1403 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1404 if (dpu_enc->phys_encs[i] == ready_phys) {
1405 trace_dpu_enc_frame_done_cb(DRMID(drm_enc), i,
1406 dpu_enc->frame_busy_mask[0]);
1407 clear_bit(i, dpu_enc->frame_busy_mask);
1408 }
1409 }
1410
1411 if (!dpu_enc->frame_busy_mask[0]) {
1412 atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
1413 del_timer(&dpu_enc->frame_done_timer);
1414
1415 dpu_encoder_resource_control(drm_enc,
1416 DPU_ENC_RC_EVENT_FRAME_DONE);
1417
1418 if (dpu_enc->crtc_frame_event_cb)
1419 dpu_enc->crtc_frame_event_cb(
1420 dpu_enc->crtc_frame_event_cb_data,
1421 event);
1422 }
1423 } else {
1424 if (dpu_enc->crtc_frame_event_cb)
1425 dpu_enc->crtc_frame_event_cb(
1426 dpu_enc->crtc_frame_event_cb_data, event);
1427 }
1428 }
1429
dpu_encoder_off_work(struct work_struct * work)1430 static void dpu_encoder_off_work(struct work_struct *work)
1431 {
1432 struct dpu_encoder_virt *dpu_enc = container_of(work,
1433 struct dpu_encoder_virt, delayed_off_work.work);
1434
1435 dpu_encoder_resource_control(&dpu_enc->base,
1436 DPU_ENC_RC_EVENT_ENTER_IDLE);
1437
1438 dpu_encoder_frame_done_callback(&dpu_enc->base, NULL,
1439 DPU_ENCODER_FRAME_EVENT_IDLE);
1440 }
1441
1442 /**
1443 * _dpu_encoder_trigger_flush - trigger flush for a physical encoder
1444 * @drm_enc: Pointer to drm encoder structure
1445 * @phys: Pointer to physical encoder structure
1446 * @extra_flush_bits: Additional bit mask to include in flush trigger
1447 */
_dpu_encoder_trigger_flush(struct drm_encoder * drm_enc,struct dpu_encoder_phys * phys,uint32_t extra_flush_bits)1448 static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
1449 struct dpu_encoder_phys *phys, uint32_t extra_flush_bits)
1450 {
1451 struct dpu_hw_ctl *ctl;
1452 int pending_kickoff_cnt;
1453 u32 ret = UINT_MAX;
1454
1455 if (!phys->hw_pp) {
1456 DPU_ERROR("invalid pingpong hw\n");
1457 return;
1458 }
1459
1460 ctl = phys->hw_ctl;
1461 if (!ctl->ops.trigger_flush) {
1462 DPU_ERROR("missing trigger cb\n");
1463 return;
1464 }
1465
1466 pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys);
1467
1468 if (extra_flush_bits && ctl->ops.update_pending_flush)
1469 ctl->ops.update_pending_flush(ctl, extra_flush_bits);
1470
1471 ctl->ops.trigger_flush(ctl);
1472
1473 if (ctl->ops.get_pending_flush)
1474 ret = ctl->ops.get_pending_flush(ctl);
1475
1476 trace_dpu_enc_trigger_flush(DRMID(drm_enc), phys->intf_idx,
1477 pending_kickoff_cnt, ctl->idx,
1478 extra_flush_bits, ret);
1479 }
1480
1481 /**
1482 * _dpu_encoder_trigger_start - trigger start for a physical encoder
1483 * @phys: Pointer to physical encoder structure
1484 */
_dpu_encoder_trigger_start(struct dpu_encoder_phys * phys)1485 static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
1486 {
1487 if (!phys) {
1488 DPU_ERROR("invalid argument(s)\n");
1489 return;
1490 }
1491
1492 if (!phys->hw_pp) {
1493 DPU_ERROR("invalid pingpong hw\n");
1494 return;
1495 }
1496
1497 if (phys->ops.trigger_start && phys->enable_state != DPU_ENC_DISABLED)
1498 phys->ops.trigger_start(phys);
1499 }
1500
dpu_encoder_helper_trigger_start(struct dpu_encoder_phys * phys_enc)1501 void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc)
1502 {
1503 struct dpu_hw_ctl *ctl;
1504
1505 ctl = phys_enc->hw_ctl;
1506 if (ctl->ops.trigger_start) {
1507 ctl->ops.trigger_start(ctl);
1508 trace_dpu_enc_trigger_start(DRMID(phys_enc->parent), ctl->idx);
1509 }
1510 }
1511
dpu_encoder_helper_wait_event_timeout(int32_t drm_id,u32 irq_idx,struct dpu_encoder_wait_info * info)1512 static int dpu_encoder_helper_wait_event_timeout(
1513 int32_t drm_id,
1514 u32 irq_idx,
1515 struct dpu_encoder_wait_info *info)
1516 {
1517 int rc = 0;
1518 s64 expected_time = ktime_to_ms(ktime_get()) + info->timeout_ms;
1519 s64 jiffies = msecs_to_jiffies(info->timeout_ms);
1520 s64 time;
1521
1522 do {
1523 rc = wait_event_timeout(*(info->wq),
1524 atomic_read(info->atomic_cnt) == 0, jiffies);
1525 time = ktime_to_ms(ktime_get());
1526
1527 trace_dpu_enc_wait_event_timeout(drm_id, irq_idx, rc, time,
1528 expected_time,
1529 atomic_read(info->atomic_cnt));
1530 /* If we timed out, counter is valid and time is less, wait again */
1531 } while (atomic_read(info->atomic_cnt) && (rc == 0) &&
1532 (time < expected_time));
1533
1534 return rc;
1535 }
1536
dpu_encoder_helper_hw_reset(struct dpu_encoder_phys * phys_enc)1537 static void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
1538 {
1539 struct dpu_encoder_virt *dpu_enc;
1540 struct dpu_hw_ctl *ctl;
1541 int rc;
1542 struct drm_encoder *drm_enc;
1543
1544 dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
1545 ctl = phys_enc->hw_ctl;
1546 drm_enc = phys_enc->parent;
1547
1548 if (!ctl->ops.reset)
1549 return;
1550
1551 DRM_DEBUG_KMS("id:%u ctl %d reset\n", DRMID(drm_enc),
1552 ctl->idx);
1553
1554 rc = ctl->ops.reset(ctl);
1555 if (rc) {
1556 DPU_ERROR_ENC(dpu_enc, "ctl %d reset failure\n", ctl->idx);
1557 msm_disp_snapshot_state(drm_enc->dev);
1558 }
1559
1560 phys_enc->enable_state = DPU_ENC_ENABLED;
1561 }
1562
1563 /**
1564 * _dpu_encoder_kickoff_phys - handle physical encoder kickoff
1565 * Iterate through the physical encoders and perform consolidated flush
1566 * and/or control start triggering as needed. This is done in the virtual
1567 * encoder rather than the individual physical ones in order to handle
1568 * use cases that require visibility into multiple physical encoders at
1569 * a time.
1570 * @dpu_enc: Pointer to virtual encoder structure
1571 */
_dpu_encoder_kickoff_phys(struct dpu_encoder_virt * dpu_enc)1572 static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
1573 {
1574 struct dpu_hw_ctl *ctl;
1575 uint32_t i, pending_flush;
1576 unsigned long lock_flags;
1577
1578 pending_flush = 0x0;
1579
1580 /* update pending counts and trigger kickoff ctl flush atomically */
1581 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1582
1583 /* don't perform flush/start operations for slave encoders */
1584 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1585 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1586
1587 if (phys->enable_state == DPU_ENC_DISABLED)
1588 continue;
1589
1590 ctl = phys->hw_ctl;
1591
1592 /*
1593 * This is cleared in frame_done worker, which isn't invoked
1594 * for async commits. So don't set this for async, since it'll
1595 * roll over to the next commit.
1596 */
1597 if (phys->split_role != ENC_ROLE_SLAVE)
1598 set_bit(i, dpu_enc->frame_busy_mask);
1599
1600 if (!phys->ops.needs_single_flush ||
1601 !phys->ops.needs_single_flush(phys))
1602 _dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0);
1603 else if (ctl->ops.get_pending_flush)
1604 pending_flush |= ctl->ops.get_pending_flush(ctl);
1605 }
1606
1607 /* for split flush, combine pending flush masks and send to master */
1608 if (pending_flush && dpu_enc->cur_master) {
1609 _dpu_encoder_trigger_flush(
1610 &dpu_enc->base,
1611 dpu_enc->cur_master,
1612 pending_flush);
1613 }
1614
1615 _dpu_encoder_trigger_start(dpu_enc->cur_master);
1616
1617 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1618 }
1619
dpu_encoder_trigger_kickoff_pending(struct drm_encoder * drm_enc)1620 void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
1621 {
1622 struct dpu_encoder_virt *dpu_enc;
1623 struct dpu_encoder_phys *phys;
1624 unsigned int i;
1625 struct dpu_hw_ctl *ctl;
1626 struct msm_display_info *disp_info;
1627
1628 if (!drm_enc) {
1629 DPU_ERROR("invalid encoder\n");
1630 return;
1631 }
1632 dpu_enc = to_dpu_encoder_virt(drm_enc);
1633 disp_info = &dpu_enc->disp_info;
1634
1635 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1636 phys = dpu_enc->phys_encs[i];
1637
1638 ctl = phys->hw_ctl;
1639 if (ctl->ops.clear_pending_flush)
1640 ctl->ops.clear_pending_flush(ctl);
1641
1642 /* update only for command mode primary ctl */
1643 if ((phys == dpu_enc->cur_master) &&
1644 (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE)
1645 && ctl->ops.trigger_pending)
1646 ctl->ops.trigger_pending(ctl);
1647 }
1648 }
1649
_dpu_encoder_calculate_linetime(struct dpu_encoder_virt * dpu_enc,struct drm_display_mode * mode)1650 static u32 _dpu_encoder_calculate_linetime(struct dpu_encoder_virt *dpu_enc,
1651 struct drm_display_mode *mode)
1652 {
1653 u64 pclk_rate;
1654 u32 pclk_period;
1655 u32 line_time;
1656
1657 /*
1658 * For linetime calculation, only operate on master encoder.
1659 */
1660 if (!dpu_enc->cur_master)
1661 return 0;
1662
1663 if (!dpu_enc->cur_master->ops.get_line_count) {
1664 DPU_ERROR("get_line_count function not defined\n");
1665 return 0;
1666 }
1667
1668 pclk_rate = mode->clock; /* pixel clock in kHz */
1669 if (pclk_rate == 0) {
1670 DPU_ERROR("pclk is 0, cannot calculate line time\n");
1671 return 0;
1672 }
1673
1674 pclk_period = DIV_ROUND_UP_ULL(1000000000ull, pclk_rate);
1675 if (pclk_period == 0) {
1676 DPU_ERROR("pclk period is 0\n");
1677 return 0;
1678 }
1679
1680 /*
1681 * Line time calculation based on Pixel clock and HTOTAL.
1682 * Final unit is in ns.
1683 */
1684 line_time = (pclk_period * mode->htotal) / 1000;
1685 if (line_time == 0) {
1686 DPU_ERROR("line time calculation is 0\n");
1687 return 0;
1688 }
1689
1690 DPU_DEBUG_ENC(dpu_enc,
1691 "clk_rate=%lldkHz, clk_period=%d, linetime=%dns\n",
1692 pclk_rate, pclk_period, line_time);
1693
1694 return line_time;
1695 }
1696
dpu_encoder_vsync_time(struct drm_encoder * drm_enc,ktime_t * wakeup_time)1697 int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time)
1698 {
1699 struct drm_display_mode *mode;
1700 struct dpu_encoder_virt *dpu_enc;
1701 u32 cur_line;
1702 u32 line_time;
1703 u32 vtotal, time_to_vsync;
1704 ktime_t cur_time;
1705
1706 dpu_enc = to_dpu_encoder_virt(drm_enc);
1707
1708 if (!drm_enc->crtc || !drm_enc->crtc->state) {
1709 DPU_ERROR("crtc/crtc state object is NULL\n");
1710 return -EINVAL;
1711 }
1712 mode = &drm_enc->crtc->state->adjusted_mode;
1713
1714 line_time = _dpu_encoder_calculate_linetime(dpu_enc, mode);
1715 if (!line_time)
1716 return -EINVAL;
1717
1718 cur_line = dpu_enc->cur_master->ops.get_line_count(dpu_enc->cur_master);
1719
1720 vtotal = mode->vtotal;
1721 if (cur_line >= vtotal)
1722 time_to_vsync = line_time * vtotal;
1723 else
1724 time_to_vsync = line_time * (vtotal - cur_line);
1725
1726 if (time_to_vsync == 0) {
1727 DPU_ERROR("time to vsync should not be zero, vtotal=%d\n",
1728 vtotal);
1729 return -EINVAL;
1730 }
1731
1732 cur_time = ktime_get();
1733 *wakeup_time = ktime_add_ns(cur_time, time_to_vsync);
1734
1735 DPU_DEBUG_ENC(dpu_enc,
1736 "cur_line=%u vtotal=%u time_to_vsync=%u, cur_time=%lld, wakeup_time=%lld\n",
1737 cur_line, vtotal, time_to_vsync,
1738 ktime_to_ms(cur_time),
1739 ktime_to_ms(*wakeup_time));
1740 return 0;
1741 }
1742
dpu_encoder_vsync_event_handler(struct timer_list * t)1743 static void dpu_encoder_vsync_event_handler(struct timer_list *t)
1744 {
1745 struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
1746 vsync_event_timer);
1747 struct drm_encoder *drm_enc = &dpu_enc->base;
1748 struct msm_drm_private *priv;
1749 struct msm_drm_thread *event_thread;
1750
1751 if (!drm_enc->dev || !drm_enc->crtc) {
1752 DPU_ERROR("invalid parameters\n");
1753 return;
1754 }
1755
1756 priv = drm_enc->dev->dev_private;
1757
1758 if (drm_enc->crtc->index >= ARRAY_SIZE(priv->event_thread)) {
1759 DPU_ERROR("invalid crtc index\n");
1760 return;
1761 }
1762 event_thread = &priv->event_thread[drm_enc->crtc->index];
1763 if (!event_thread) {
1764 DPU_ERROR("event_thread not found for crtc:%d\n",
1765 drm_enc->crtc->index);
1766 return;
1767 }
1768
1769 del_timer(&dpu_enc->vsync_event_timer);
1770 }
1771
dpu_encoder_vsync_event_work_handler(struct kthread_work * work)1772 static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work)
1773 {
1774 struct dpu_encoder_virt *dpu_enc = container_of(work,
1775 struct dpu_encoder_virt, vsync_event_work);
1776 ktime_t wakeup_time;
1777
1778 if (dpu_encoder_vsync_time(&dpu_enc->base, &wakeup_time))
1779 return;
1780
1781 trace_dpu_enc_vsync_event_work(DRMID(&dpu_enc->base), wakeup_time);
1782 mod_timer(&dpu_enc->vsync_event_timer,
1783 nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
1784 }
1785
dpu_encoder_prepare_for_kickoff(struct drm_encoder * drm_enc)1786 void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc)
1787 {
1788 struct dpu_encoder_virt *dpu_enc;
1789 struct dpu_encoder_phys *phys;
1790 bool needs_hw_reset = false;
1791 unsigned int i;
1792
1793 dpu_enc = to_dpu_encoder_virt(drm_enc);
1794
1795 trace_dpu_enc_prepare_kickoff(DRMID(drm_enc));
1796
1797 /* prepare for next kickoff, may include waiting on previous kickoff */
1798 DPU_ATRACE_BEGIN("enc_prepare_for_kickoff");
1799 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1800 phys = dpu_enc->phys_encs[i];
1801 if (phys->ops.prepare_for_kickoff)
1802 phys->ops.prepare_for_kickoff(phys);
1803 if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET)
1804 needs_hw_reset = true;
1805 }
1806 DPU_ATRACE_END("enc_prepare_for_kickoff");
1807
1808 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
1809
1810 /* if any phys needs reset, reset all phys, in-order */
1811 if (needs_hw_reset) {
1812 trace_dpu_enc_prepare_kickoff_reset(DRMID(drm_enc));
1813 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1814 dpu_encoder_helper_hw_reset(dpu_enc->phys_encs[i]);
1815 }
1816 }
1817 }
1818
dpu_encoder_kickoff(struct drm_encoder * drm_enc)1819 void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
1820 {
1821 struct dpu_encoder_virt *dpu_enc;
1822 struct dpu_encoder_phys *phys;
1823 ktime_t wakeup_time;
1824 unsigned long timeout_ms;
1825 unsigned int i;
1826
1827 DPU_ATRACE_BEGIN("encoder_kickoff");
1828 dpu_enc = to_dpu_encoder_virt(drm_enc);
1829
1830 trace_dpu_enc_kickoff(DRMID(drm_enc));
1831
1832 timeout_ms = DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES * 1000 /
1833 drm_mode_vrefresh(&drm_enc->crtc->state->adjusted_mode);
1834
1835 atomic_set(&dpu_enc->frame_done_timeout_ms, timeout_ms);
1836 mod_timer(&dpu_enc->frame_done_timer,
1837 jiffies + msecs_to_jiffies(timeout_ms));
1838
1839 /* All phys encs are ready to go, trigger the kickoff */
1840 _dpu_encoder_kickoff_phys(dpu_enc);
1841
1842 /* allow phys encs to handle any post-kickoff business */
1843 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1844 phys = dpu_enc->phys_encs[i];
1845 if (phys->ops.handle_post_kickoff)
1846 phys->ops.handle_post_kickoff(phys);
1847 }
1848
1849 if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI &&
1850 !dpu_encoder_vsync_time(drm_enc, &wakeup_time)) {
1851 trace_dpu_enc_early_kickoff(DRMID(drm_enc),
1852 ktime_to_ms(wakeup_time));
1853 mod_timer(&dpu_enc->vsync_event_timer,
1854 nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
1855 }
1856
1857 DPU_ATRACE_END("encoder_kickoff");
1858 }
1859
dpu_encoder_prepare_commit(struct drm_encoder * drm_enc)1860 void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc)
1861 {
1862 struct dpu_encoder_virt *dpu_enc;
1863 struct dpu_encoder_phys *phys;
1864 int i;
1865
1866 if (!drm_enc) {
1867 DPU_ERROR("invalid encoder\n");
1868 return;
1869 }
1870 dpu_enc = to_dpu_encoder_virt(drm_enc);
1871
1872 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1873 phys = dpu_enc->phys_encs[i];
1874 if (phys->ops.prepare_commit)
1875 phys->ops.prepare_commit(phys);
1876 }
1877 }
1878
1879 #ifdef CONFIG_DEBUG_FS
_dpu_encoder_status_show(struct seq_file * s,void * data)1880 static int _dpu_encoder_status_show(struct seq_file *s, void *data)
1881 {
1882 struct dpu_encoder_virt *dpu_enc = s->private;
1883 int i;
1884
1885 mutex_lock(&dpu_enc->enc_lock);
1886 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1887 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1888
1889 seq_printf(s, "intf:%d vsync:%8d underrun:%8d ",
1890 phys->intf_idx - INTF_0,
1891 atomic_read(&phys->vsync_cnt),
1892 atomic_read(&phys->underrun_cnt));
1893
1894 switch (phys->intf_mode) {
1895 case INTF_MODE_VIDEO:
1896 seq_puts(s, "mode: video\n");
1897 break;
1898 case INTF_MODE_CMD:
1899 seq_puts(s, "mode: command\n");
1900 break;
1901 default:
1902 seq_puts(s, "mode: ???\n");
1903 break;
1904 }
1905 }
1906 mutex_unlock(&dpu_enc->enc_lock);
1907
1908 return 0;
1909 }
1910
1911 DEFINE_SHOW_ATTRIBUTE(_dpu_encoder_status);
1912
_dpu_encoder_init_debugfs(struct drm_encoder * drm_enc)1913 static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
1914 {
1915 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1916 int i;
1917
1918 char name[DPU_NAME_SIZE];
1919
1920 if (!drm_enc->dev) {
1921 DPU_ERROR("invalid encoder or kms\n");
1922 return -EINVAL;
1923 }
1924
1925 snprintf(name, DPU_NAME_SIZE, "encoder%u", drm_enc->base.id);
1926
1927 /* create overall sub-directory for the encoder */
1928 dpu_enc->debugfs_root = debugfs_create_dir(name,
1929 drm_enc->dev->primary->debugfs_root);
1930
1931 /* don't error check these */
1932 debugfs_create_file("status", 0600,
1933 dpu_enc->debugfs_root, dpu_enc, &_dpu_encoder_status_fops);
1934
1935 for (i = 0; i < dpu_enc->num_phys_encs; i++)
1936 if (dpu_enc->phys_encs[i]->ops.late_register)
1937 dpu_enc->phys_encs[i]->ops.late_register(
1938 dpu_enc->phys_encs[i],
1939 dpu_enc->debugfs_root);
1940
1941 return 0;
1942 }
1943 #else
_dpu_encoder_init_debugfs(struct drm_encoder * drm_enc)1944 static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
1945 {
1946 return 0;
1947 }
1948 #endif
1949
dpu_encoder_late_register(struct drm_encoder * encoder)1950 static int dpu_encoder_late_register(struct drm_encoder *encoder)
1951 {
1952 return _dpu_encoder_init_debugfs(encoder);
1953 }
1954
dpu_encoder_early_unregister(struct drm_encoder * encoder)1955 static void dpu_encoder_early_unregister(struct drm_encoder *encoder)
1956 {
1957 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder);
1958
1959 debugfs_remove_recursive(dpu_enc->debugfs_root);
1960 }
1961
dpu_encoder_virt_add_phys_encs(u32 display_caps,struct dpu_encoder_virt * dpu_enc,struct dpu_enc_phys_init_params * params)1962 static int dpu_encoder_virt_add_phys_encs(
1963 u32 display_caps,
1964 struct dpu_encoder_virt *dpu_enc,
1965 struct dpu_enc_phys_init_params *params)
1966 {
1967 struct dpu_encoder_phys *enc = NULL;
1968
1969 DPU_DEBUG_ENC(dpu_enc, "\n");
1970
1971 /*
1972 * We may create up to NUM_PHYS_ENCODER_TYPES physical encoder types
1973 * in this function, check up-front.
1974 */
1975 if (dpu_enc->num_phys_encs + NUM_PHYS_ENCODER_TYPES >=
1976 ARRAY_SIZE(dpu_enc->phys_encs)) {
1977 DPU_ERROR_ENC(dpu_enc, "too many physical encoders %d\n",
1978 dpu_enc->num_phys_encs);
1979 return -EINVAL;
1980 }
1981
1982 if (display_caps & MSM_DISPLAY_CAP_VID_MODE) {
1983 enc = dpu_encoder_phys_vid_init(params);
1984
1985 if (IS_ERR_OR_NULL(enc)) {
1986 DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n",
1987 PTR_ERR(enc));
1988 return enc == NULL ? -EINVAL : PTR_ERR(enc);
1989 }
1990
1991 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
1992 ++dpu_enc->num_phys_encs;
1993 }
1994
1995 if (display_caps & MSM_DISPLAY_CAP_CMD_MODE) {
1996 enc = dpu_encoder_phys_cmd_init(params);
1997
1998 if (IS_ERR_OR_NULL(enc)) {
1999 DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n",
2000 PTR_ERR(enc));
2001 return enc == NULL ? -EINVAL : PTR_ERR(enc);
2002 }
2003
2004 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
2005 ++dpu_enc->num_phys_encs;
2006 }
2007
2008 if (params->split_role == ENC_ROLE_SLAVE)
2009 dpu_enc->cur_slave = enc;
2010 else
2011 dpu_enc->cur_master = enc;
2012
2013 return 0;
2014 }
2015
2016 static const struct dpu_encoder_virt_ops dpu_encoder_parent_ops = {
2017 .handle_vblank_virt = dpu_encoder_vblank_callback,
2018 .handle_underrun_virt = dpu_encoder_underrun_callback,
2019 .handle_frame_done = dpu_encoder_frame_done_callback,
2020 };
2021
dpu_encoder_setup_display(struct dpu_encoder_virt * dpu_enc,struct dpu_kms * dpu_kms,struct msm_display_info * disp_info)2022 static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
2023 struct dpu_kms *dpu_kms,
2024 struct msm_display_info *disp_info)
2025 {
2026 int ret = 0;
2027 int i = 0;
2028 enum dpu_intf_type intf_type = INTF_NONE;
2029 struct dpu_enc_phys_init_params phys_params;
2030
2031 if (!dpu_enc) {
2032 DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != NULL);
2033 return -EINVAL;
2034 }
2035
2036 dpu_enc->cur_master = NULL;
2037
2038 memset(&phys_params, 0, sizeof(phys_params));
2039 phys_params.dpu_kms = dpu_kms;
2040 phys_params.parent = &dpu_enc->base;
2041 phys_params.parent_ops = &dpu_encoder_parent_ops;
2042 phys_params.enc_spinlock = &dpu_enc->enc_spinlock;
2043
2044 switch (disp_info->intf_type) {
2045 case DRM_MODE_ENCODER_DSI:
2046 intf_type = INTF_DSI;
2047 break;
2048 case DRM_MODE_ENCODER_TMDS:
2049 intf_type = INTF_DP;
2050 break;
2051 }
2052
2053 WARN_ON(disp_info->num_of_h_tiles < 1);
2054
2055 DPU_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles);
2056
2057 if ((disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) ||
2058 (disp_info->capabilities & MSM_DISPLAY_CAP_VID_MODE))
2059 dpu_enc->idle_pc_supported =
2060 dpu_kms->catalog->caps->has_idle_pc;
2061
2062 mutex_lock(&dpu_enc->enc_lock);
2063 for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) {
2064 /*
2065 * Left-most tile is at index 0, content is controller id
2066 * h_tile_instance_ids[2] = {0, 1}; DSI0 = left, DSI1 = right
2067 * h_tile_instance_ids[2] = {1, 0}; DSI1 = left, DSI0 = right
2068 */
2069 u32 controller_id = disp_info->h_tile_instance[i];
2070
2071 if (disp_info->num_of_h_tiles > 1) {
2072 if (i == 0)
2073 phys_params.split_role = ENC_ROLE_MASTER;
2074 else
2075 phys_params.split_role = ENC_ROLE_SLAVE;
2076 } else {
2077 phys_params.split_role = ENC_ROLE_SOLO;
2078 }
2079
2080 DPU_DEBUG("h_tile_instance %d = %d, split_role %d\n",
2081 i, controller_id, phys_params.split_role);
2082
2083 phys_params.intf_idx = dpu_encoder_get_intf(dpu_kms->catalog,
2084 intf_type,
2085 controller_id);
2086 if (phys_params.intf_idx == INTF_MAX) {
2087 DPU_ERROR_ENC(dpu_enc, "could not get intf: type %d, id %d\n",
2088 intf_type, controller_id);
2089 ret = -EINVAL;
2090 }
2091
2092 if (!ret) {
2093 ret = dpu_encoder_virt_add_phys_encs(disp_info->capabilities,
2094 dpu_enc,
2095 &phys_params);
2096 if (ret)
2097 DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n");
2098 }
2099 }
2100
2101 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2102 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
2103 atomic_set(&phys->vsync_cnt, 0);
2104 atomic_set(&phys->underrun_cnt, 0);
2105 }
2106 mutex_unlock(&dpu_enc->enc_lock);
2107
2108 return ret;
2109 }
2110
dpu_encoder_frame_done_timeout(struct timer_list * t)2111 static void dpu_encoder_frame_done_timeout(struct timer_list *t)
2112 {
2113 struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
2114 frame_done_timer);
2115 struct drm_encoder *drm_enc = &dpu_enc->base;
2116 u32 event;
2117
2118 if (!drm_enc->dev) {
2119 DPU_ERROR("invalid parameters\n");
2120 return;
2121 }
2122
2123 if (!dpu_enc->frame_busy_mask[0] || !dpu_enc->crtc_frame_event_cb) {
2124 DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n",
2125 DRMID(drm_enc), dpu_enc->frame_busy_mask[0]);
2126 return;
2127 } else if (!atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
2128 DRM_DEBUG_KMS("id:%u invalid timeout\n", DRMID(drm_enc));
2129 return;
2130 }
2131
2132 DPU_ERROR_ENC_RATELIMITED(dpu_enc, "frame done timeout\n");
2133
2134 event = DPU_ENCODER_FRAME_EVENT_ERROR;
2135 trace_dpu_enc_frame_done_timeout(DRMID(drm_enc), event);
2136 dpu_enc->crtc_frame_event_cb(dpu_enc->crtc_frame_event_cb_data, event);
2137 }
2138
2139 static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = {
2140 .mode_set = dpu_encoder_virt_mode_set,
2141 .disable = dpu_encoder_virt_disable,
2142 .enable = dpu_kms_encoder_enable,
2143 .atomic_check = dpu_encoder_virt_atomic_check,
2144
2145 /* This is called by dpu_kms_encoder_enable */
2146 .commit = dpu_encoder_virt_enable,
2147 };
2148
2149 static const struct drm_encoder_funcs dpu_encoder_funcs = {
2150 .destroy = dpu_encoder_destroy,
2151 .late_register = dpu_encoder_late_register,
2152 .early_unregister = dpu_encoder_early_unregister,
2153 };
2154
dpu_encoder_setup(struct drm_device * dev,struct drm_encoder * enc,struct msm_display_info * disp_info)2155 int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
2156 struct msm_display_info *disp_info)
2157 {
2158 struct msm_drm_private *priv = dev->dev_private;
2159 struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
2160 struct drm_encoder *drm_enc = NULL;
2161 struct dpu_encoder_virt *dpu_enc = NULL;
2162 int ret = 0;
2163
2164 dpu_enc = to_dpu_encoder_virt(enc);
2165
2166 ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info);
2167 if (ret)
2168 goto fail;
2169
2170 atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
2171 timer_setup(&dpu_enc->frame_done_timer,
2172 dpu_encoder_frame_done_timeout, 0);
2173
2174 if (disp_info->intf_type == DRM_MODE_ENCODER_DSI)
2175 timer_setup(&dpu_enc->vsync_event_timer,
2176 dpu_encoder_vsync_event_handler,
2177 0);
2178 else if (disp_info->intf_type == DRM_MODE_ENCODER_TMDS)
2179 dpu_enc->dp = priv->dp;
2180
2181 INIT_DELAYED_WORK(&dpu_enc->delayed_off_work,
2182 dpu_encoder_off_work);
2183 dpu_enc->idle_timeout = IDLE_TIMEOUT;
2184
2185 kthread_init_work(&dpu_enc->vsync_event_work,
2186 dpu_encoder_vsync_event_work_handler);
2187
2188 memcpy(&dpu_enc->disp_info, disp_info, sizeof(*disp_info));
2189
2190 DPU_DEBUG_ENC(dpu_enc, "created\n");
2191
2192 return ret;
2193
2194 fail:
2195 DPU_ERROR("failed to create encoder\n");
2196 if (drm_enc)
2197 dpu_encoder_destroy(drm_enc);
2198
2199 return ret;
2200
2201
2202 }
2203
dpu_encoder_init(struct drm_device * dev,int drm_enc_mode)2204 struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
2205 int drm_enc_mode)
2206 {
2207 struct dpu_encoder_virt *dpu_enc = NULL;
2208 int rc = 0;
2209
2210 dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL);
2211 if (!dpu_enc)
2212 return ERR_PTR(-ENOMEM);
2213
2214 rc = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs,
2215 drm_enc_mode, NULL);
2216 if (rc) {
2217 devm_kfree(dev->dev, dpu_enc);
2218 return ERR_PTR(rc);
2219 }
2220
2221 drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs);
2222
2223 spin_lock_init(&dpu_enc->enc_spinlock);
2224 dpu_enc->enabled = false;
2225 mutex_init(&dpu_enc->enc_lock);
2226 mutex_init(&dpu_enc->rc_lock);
2227
2228 return &dpu_enc->base;
2229 }
2230
dpu_encoder_wait_for_event(struct drm_encoder * drm_enc,enum msm_event_wait event)2231 int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
2232 enum msm_event_wait event)
2233 {
2234 int (*fn_wait)(struct dpu_encoder_phys *phys_enc) = NULL;
2235 struct dpu_encoder_virt *dpu_enc = NULL;
2236 int i, ret = 0;
2237
2238 if (!drm_enc) {
2239 DPU_ERROR("invalid encoder\n");
2240 return -EINVAL;
2241 }
2242 dpu_enc = to_dpu_encoder_virt(drm_enc);
2243 DPU_DEBUG_ENC(dpu_enc, "\n");
2244
2245 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2246 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
2247
2248 switch (event) {
2249 case MSM_ENC_COMMIT_DONE:
2250 fn_wait = phys->ops.wait_for_commit_done;
2251 break;
2252 case MSM_ENC_TX_COMPLETE:
2253 fn_wait = phys->ops.wait_for_tx_complete;
2254 break;
2255 case MSM_ENC_VBLANK:
2256 fn_wait = phys->ops.wait_for_vblank;
2257 break;
2258 default:
2259 DPU_ERROR_ENC(dpu_enc, "unknown wait event %d\n",
2260 event);
2261 return -EINVAL;
2262 }
2263
2264 if (fn_wait) {
2265 DPU_ATRACE_BEGIN("wait_for_completion_event");
2266 ret = fn_wait(phys);
2267 DPU_ATRACE_END("wait_for_completion_event");
2268 if (ret)
2269 return ret;
2270 }
2271 }
2272
2273 return ret;
2274 }
2275
dpu_encoder_get_intf_mode(struct drm_encoder * encoder)2276 enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder)
2277 {
2278 struct dpu_encoder_virt *dpu_enc = NULL;
2279
2280 if (!encoder) {
2281 DPU_ERROR("invalid encoder\n");
2282 return INTF_MODE_NONE;
2283 }
2284 dpu_enc = to_dpu_encoder_virt(encoder);
2285
2286 if (dpu_enc->cur_master)
2287 return dpu_enc->cur_master->intf_mode;
2288
2289 if (dpu_enc->num_phys_encs)
2290 return dpu_enc->phys_encs[0]->intf_mode;
2291
2292 return INTF_MODE_NONE;
2293 }
2294