• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published by
8  * the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
20 
21 #include <drm/drm_crtc.h>
22 #include <linux/debugfs.h>
23 #include <linux/of_irq.h>
24 #include <linux/dma-buf.h>
25 
26 #include "msm_drv.h"
27 #include "msm_mmu.h"
28 #include "msm_gem.h"
29 
30 #include "dpu_kms.h"
31 #include "dpu_core_irq.h"
32 #include "dpu_formats.h"
33 #include "dpu_hw_vbif.h"
34 #include "dpu_vbif.h"
35 #include "dpu_encoder.h"
36 #include "dpu_plane.h"
37 #include "dpu_crtc.h"
38 
39 #define CREATE_TRACE_POINTS
40 #include "dpu_trace.h"
41 
42 static const char * const iommu_ports[] = {
43 		"mdp_0",
44 };
45 
46 /*
47  * To enable overall DRM driver logging
48  * # echo 0x2 > /sys/module/drm/parameters/debug
49  *
50  * To enable DRM driver h/w logging
51  * # echo <mask> > /sys/kernel/debug/dri/0/debug/hw_log_mask
52  *
53  * See dpu_hw_mdss.h for h/w logging mask definitions (search for DPU_DBG_MASK_)
54  */
55 #define DPU_DEBUGFS_DIR "msm_dpu"
56 #define DPU_DEBUGFS_HWMASKNAME "hw_log_mask"
57 
58 static int dpu_kms_hw_init(struct msm_kms *kms);
59 static int _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
60 
dpu_iomap_size(struct platform_device * pdev,const char * name)61 static unsigned long dpu_iomap_size(struct platform_device *pdev,
62 				    const char *name)
63 {
64 	struct resource *res;
65 
66 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
67 	if (!res) {
68 		DRM_ERROR("failed to get memory resource: %s\n", name);
69 		return 0;
70 	}
71 
72 	return resource_size(res);
73 }
74 
75 #ifdef CONFIG_DEBUG_FS
_dpu_danger_signal_status(struct seq_file * s,bool danger_status)76 static int _dpu_danger_signal_status(struct seq_file *s,
77 		bool danger_status)
78 {
79 	struct dpu_kms *kms = (struct dpu_kms *)s->private;
80 	struct msm_drm_private *priv;
81 	struct dpu_danger_safe_status status;
82 	int i;
83 
84 	if (!kms || !kms->dev || !kms->dev->dev_private || !kms->hw_mdp) {
85 		DPU_ERROR("invalid arg(s)\n");
86 		return 0;
87 	}
88 
89 	priv = kms->dev->dev_private;
90 	memset(&status, 0, sizeof(struct dpu_danger_safe_status));
91 
92 	pm_runtime_get_sync(&kms->pdev->dev);
93 	if (danger_status) {
94 		seq_puts(s, "\nDanger signal status:\n");
95 		if (kms->hw_mdp->ops.get_danger_status)
96 			kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
97 					&status);
98 	} else {
99 		seq_puts(s, "\nSafe signal status:\n");
100 		if (kms->hw_mdp->ops.get_danger_status)
101 			kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
102 					&status);
103 	}
104 	pm_runtime_put_sync(&kms->pdev->dev);
105 
106 	seq_printf(s, "MDP     :  0x%x\n", status.mdp);
107 
108 	for (i = SSPP_VIG0; i < SSPP_MAX; i++)
109 		seq_printf(s, "SSPP%d   :  0x%x  \t", i - SSPP_VIG0,
110 				status.sspp[i]);
111 	seq_puts(s, "\n");
112 
113 	return 0;
114 }
115 
116 #define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix)				\
117 static int __prefix ## _open(struct inode *inode, struct file *file)	\
118 {									\
119 	return single_open(file, __prefix ## _show, inode->i_private);	\
120 }									\
121 static const struct file_operations __prefix ## _fops = {		\
122 	.owner = THIS_MODULE,						\
123 	.open = __prefix ## _open,					\
124 	.release = single_release,					\
125 	.read = seq_read,						\
126 	.llseek = seq_lseek,						\
127 }
128 
dpu_debugfs_danger_stats_show(struct seq_file * s,void * v)129 static int dpu_debugfs_danger_stats_show(struct seq_file *s, void *v)
130 {
131 	return _dpu_danger_signal_status(s, true);
132 }
133 DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_danger_stats);
134 
dpu_debugfs_safe_stats_show(struct seq_file * s,void * v)135 static int dpu_debugfs_safe_stats_show(struct seq_file *s, void *v)
136 {
137 	return _dpu_danger_signal_status(s, false);
138 }
139 DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_safe_stats);
140 
dpu_debugfs_danger_destroy(struct dpu_kms * dpu_kms)141 static void dpu_debugfs_danger_destroy(struct dpu_kms *dpu_kms)
142 {
143 	debugfs_remove_recursive(dpu_kms->debugfs_danger);
144 	dpu_kms->debugfs_danger = NULL;
145 }
146 
dpu_debugfs_danger_init(struct dpu_kms * dpu_kms,struct dentry * parent)147 static int dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
148 		struct dentry *parent)
149 {
150 	dpu_kms->debugfs_danger = debugfs_create_dir("danger",
151 			parent);
152 	if (!dpu_kms->debugfs_danger) {
153 		DPU_ERROR("failed to create danger debugfs\n");
154 		return -EINVAL;
155 	}
156 
157 	debugfs_create_file("danger_status", 0600, dpu_kms->debugfs_danger,
158 			dpu_kms, &dpu_debugfs_danger_stats_fops);
159 	debugfs_create_file("safe_status", 0600, dpu_kms->debugfs_danger,
160 			dpu_kms, &dpu_debugfs_safe_stats_fops);
161 
162 	return 0;
163 }
164 
_dpu_debugfs_show_regset32(struct seq_file * s,void * data)165 static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data)
166 {
167 	struct dpu_debugfs_regset32 *regset;
168 	struct dpu_kms *dpu_kms;
169 	struct drm_device *dev;
170 	struct msm_drm_private *priv;
171 	void __iomem *base;
172 	uint32_t i, addr;
173 
174 	if (!s || !s->private)
175 		return 0;
176 
177 	regset = s->private;
178 
179 	dpu_kms = regset->dpu_kms;
180 	if (!dpu_kms || !dpu_kms->mmio)
181 		return 0;
182 
183 	dev = dpu_kms->dev;
184 	if (!dev)
185 		return 0;
186 
187 	priv = dev->dev_private;
188 	if (!priv)
189 		return 0;
190 
191 	base = dpu_kms->mmio + regset->offset;
192 
193 	/* insert padding spaces, if needed */
194 	if (regset->offset & 0xF) {
195 		seq_printf(s, "[%x]", regset->offset & ~0xF);
196 		for (i = 0; i < (regset->offset & 0xF); i += 4)
197 			seq_puts(s, "         ");
198 	}
199 
200 	pm_runtime_get_sync(&dpu_kms->pdev->dev);
201 
202 	/* main register output */
203 	for (i = 0; i < regset->blk_len; i += 4) {
204 		addr = regset->offset + i;
205 		if ((addr & 0xF) == 0x0)
206 			seq_printf(s, i ? "\n[%x]" : "[%x]", addr);
207 		seq_printf(s, " %08x", readl_relaxed(base + i));
208 	}
209 	seq_puts(s, "\n");
210 	pm_runtime_put_sync(&dpu_kms->pdev->dev);
211 
212 	return 0;
213 }
214 
dpu_debugfs_open_regset32(struct inode * inode,struct file * file)215 static int dpu_debugfs_open_regset32(struct inode *inode,
216 		struct file *file)
217 {
218 	return single_open(file, _dpu_debugfs_show_regset32, inode->i_private);
219 }
220 
221 static const struct file_operations dpu_fops_regset32 = {
222 	.open =		dpu_debugfs_open_regset32,
223 	.read =		seq_read,
224 	.llseek =	seq_lseek,
225 	.release =	single_release,
226 };
227 
dpu_debugfs_setup_regset32(struct dpu_debugfs_regset32 * regset,uint32_t offset,uint32_t length,struct dpu_kms * dpu_kms)228 void dpu_debugfs_setup_regset32(struct dpu_debugfs_regset32 *regset,
229 		uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms)
230 {
231 	if (regset) {
232 		regset->offset = offset;
233 		regset->blk_len = length;
234 		regset->dpu_kms = dpu_kms;
235 	}
236 }
237 
dpu_debugfs_create_regset32(const char * name,umode_t mode,void * parent,struct dpu_debugfs_regset32 * regset)238 void *dpu_debugfs_create_regset32(const char *name, umode_t mode,
239 		void *parent, struct dpu_debugfs_regset32 *regset)
240 {
241 	if (!name || !regset || !regset->dpu_kms || !regset->blk_len)
242 		return NULL;
243 
244 	/* make sure offset is a multiple of 4 */
245 	regset->offset = round_down(regset->offset, 4);
246 
247 	return debugfs_create_file(name, mode, parent,
248 			regset, &dpu_fops_regset32);
249 }
250 
_dpu_debugfs_init(struct dpu_kms * dpu_kms)251 static int _dpu_debugfs_init(struct dpu_kms *dpu_kms)
252 {
253 	void *p;
254 	int rc;
255 
256 	p = dpu_hw_util_get_log_mask_ptr();
257 
258 	if (!dpu_kms || !p)
259 		return -EINVAL;
260 
261 	dpu_kms->debugfs_root = debugfs_create_dir("debug",
262 					   dpu_kms->dev->primary->debugfs_root);
263 	if (IS_ERR_OR_NULL(dpu_kms->debugfs_root)) {
264 		DRM_ERROR("debugfs create_dir failed %ld\n",
265 			  PTR_ERR(dpu_kms->debugfs_root));
266 		return PTR_ERR(dpu_kms->debugfs_root);
267 	}
268 
269 	rc = dpu_dbg_debugfs_register(dpu_kms->debugfs_root);
270 	if (rc) {
271 		DRM_ERROR("failed to reg dpu dbg debugfs: %d\n", rc);
272 		return rc;
273 	}
274 
275 	/* allow root to be NULL */
276 	debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, dpu_kms->debugfs_root, p);
277 
278 	(void) dpu_debugfs_danger_init(dpu_kms, dpu_kms->debugfs_root);
279 	(void) dpu_debugfs_vbif_init(dpu_kms, dpu_kms->debugfs_root);
280 	(void) dpu_debugfs_core_irq_init(dpu_kms, dpu_kms->debugfs_root);
281 
282 	rc = dpu_core_perf_debugfs_init(&dpu_kms->perf, dpu_kms->debugfs_root);
283 	if (rc) {
284 		DPU_ERROR("failed to init perf %d\n", rc);
285 		return rc;
286 	}
287 
288 	return 0;
289 }
290 
_dpu_debugfs_destroy(struct dpu_kms * dpu_kms)291 static void _dpu_debugfs_destroy(struct dpu_kms *dpu_kms)
292 {
293 	/* don't need to NULL check debugfs_root */
294 	if (dpu_kms) {
295 		dpu_debugfs_vbif_destroy(dpu_kms);
296 		dpu_debugfs_danger_destroy(dpu_kms);
297 		dpu_debugfs_core_irq_destroy(dpu_kms);
298 		debugfs_remove_recursive(dpu_kms->debugfs_root);
299 	}
300 }
301 #else
_dpu_debugfs_destroy(struct dpu_kms * dpu_kms)302 static void _dpu_debugfs_destroy(struct dpu_kms *dpu_kms)
303 {
304 }
305 #endif
306 
dpu_kms_enable_vblank(struct msm_kms * kms,struct drm_crtc * crtc)307 static int dpu_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
308 {
309 	return dpu_crtc_vblank(crtc, true);
310 }
311 
dpu_kms_disable_vblank(struct msm_kms * kms,struct drm_crtc * crtc)312 static void dpu_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
313 {
314 	dpu_crtc_vblank(crtc, false);
315 }
316 
dpu_kms_prepare_commit(struct msm_kms * kms,struct drm_atomic_state * state)317 static void dpu_kms_prepare_commit(struct msm_kms *kms,
318 		struct drm_atomic_state *state)
319 {
320 	struct dpu_kms *dpu_kms;
321 	struct msm_drm_private *priv;
322 	struct drm_device *dev;
323 	struct drm_encoder *encoder;
324 
325 	if (!kms)
326 		return;
327 	dpu_kms = to_dpu_kms(kms);
328 	dev = dpu_kms->dev;
329 
330 	if (!dev || !dev->dev_private)
331 		return;
332 	priv = dev->dev_private;
333 	pm_runtime_get_sync(&dpu_kms->pdev->dev);
334 
335 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
336 		if (encoder->crtc != NULL)
337 			dpu_encoder_prepare_commit(encoder);
338 }
339 
340 /*
341  * Override the encoder enable since we need to setup the inline rotator and do
342  * some crtc magic before enabling any bridge that might be present.
343  */
dpu_kms_encoder_enable(struct drm_encoder * encoder)344 void dpu_kms_encoder_enable(struct drm_encoder *encoder)
345 {
346 	const struct drm_encoder_helper_funcs *funcs = encoder->helper_private;
347 	struct drm_crtc *crtc = encoder->crtc;
348 
349 	/* Forward this enable call to the commit hook */
350 	if (funcs && funcs->commit)
351 		funcs->commit(encoder);
352 
353 	if (crtc && crtc->state->active) {
354 		trace_dpu_kms_enc_enable(DRMID(crtc));
355 		dpu_crtc_commit_kickoff(crtc);
356 	}
357 }
358 
dpu_kms_commit(struct msm_kms * kms,struct drm_atomic_state * state)359 static void dpu_kms_commit(struct msm_kms *kms, struct drm_atomic_state *state)
360 {
361 	struct drm_crtc *crtc;
362 	struct drm_crtc_state *crtc_state;
363 	int i;
364 
365 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
366 		/* If modeset is required, kickoff is run in encoder_enable */
367 		if (drm_atomic_crtc_needs_modeset(crtc_state))
368 			continue;
369 
370 		if (crtc->state->active) {
371 			trace_dpu_kms_commit(DRMID(crtc));
372 			dpu_crtc_commit_kickoff(crtc);
373 		}
374 	}
375 }
376 
dpu_kms_complete_commit(struct msm_kms * kms,struct drm_atomic_state * old_state)377 static void dpu_kms_complete_commit(struct msm_kms *kms,
378 		struct drm_atomic_state *old_state)
379 {
380 	struct dpu_kms *dpu_kms;
381 	struct msm_drm_private *priv;
382 	struct drm_crtc *crtc;
383 	struct drm_crtc_state *old_crtc_state;
384 	int i;
385 
386 	if (!kms || !old_state)
387 		return;
388 	dpu_kms = to_dpu_kms(kms);
389 
390 	if (!dpu_kms->dev || !dpu_kms->dev->dev_private)
391 		return;
392 	priv = dpu_kms->dev->dev_private;
393 
394 	DPU_ATRACE_BEGIN("kms_complete_commit");
395 
396 	for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i)
397 		dpu_crtc_complete_commit(crtc, old_crtc_state);
398 
399 	pm_runtime_put_sync(&dpu_kms->pdev->dev);
400 
401 	DPU_ATRACE_END("kms_complete_commit");
402 }
403 
dpu_kms_wait_for_commit_done(struct msm_kms * kms,struct drm_crtc * crtc)404 static void dpu_kms_wait_for_commit_done(struct msm_kms *kms,
405 		struct drm_crtc *crtc)
406 {
407 	struct drm_encoder *encoder;
408 	struct drm_device *dev;
409 	int ret;
410 
411 	if (!kms || !crtc || !crtc->state) {
412 		DPU_ERROR("invalid params\n");
413 		return;
414 	}
415 
416 	dev = crtc->dev;
417 
418 	if (!crtc->state->enable) {
419 		DPU_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
420 		return;
421 	}
422 
423 	if (!crtc->state->active) {
424 		DPU_DEBUG("[crtc:%d] not active\n", crtc->base.id);
425 		return;
426 	}
427 
428 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
429 		if (encoder->crtc != crtc)
430 			continue;
431 		/*
432 		 * Wait for post-flush if necessary to delay before
433 		 * plane_cleanup. For example, wait for vsync in case of video
434 		 * mode panels. This may be a no-op for command mode panels.
435 		 */
436 		trace_dpu_kms_wait_for_commit_done(DRMID(crtc));
437 		ret = dpu_encoder_wait_for_event(encoder, MSM_ENC_COMMIT_DONE);
438 		if (ret && ret != -EWOULDBLOCK) {
439 			DPU_ERROR("wait for commit done returned %d\n", ret);
440 			break;
441 		}
442 	}
443 }
444 
_dpu_kms_initialize_dsi(struct drm_device * dev,struct msm_drm_private * priv,struct dpu_kms * dpu_kms)445 static int _dpu_kms_initialize_dsi(struct drm_device *dev,
446 				    struct msm_drm_private *priv,
447 				    struct dpu_kms *dpu_kms)
448 {
449 	struct drm_encoder *encoder = NULL;
450 	int i, rc = 0;
451 
452 	if (!(priv->dsi[0] || priv->dsi[1]))
453 		return rc;
454 
455 	/*TODO: Support two independent DSI connectors */
456 	encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI);
457 	if (IS_ERR(encoder)) {
458 		DPU_ERROR("encoder init failed for dsi display\n");
459 		return PTR_ERR(encoder);
460 	}
461 
462 	priv->encoders[priv->num_encoders++] = encoder;
463 
464 	for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
465 		if (!priv->dsi[i])
466 			continue;
467 
468 		rc = msm_dsi_modeset_init(priv->dsi[i], dev, encoder);
469 		if (rc) {
470 			DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
471 				i, rc);
472 			break;
473 		}
474 	}
475 
476 	return rc;
477 }
478 
479 /**
480  * _dpu_kms_setup_displays - create encoders, bridges and connectors
481  *                           for underlying displays
482  * @dev:        Pointer to drm device structure
483  * @priv:       Pointer to private drm device data
484  * @dpu_kms:    Pointer to dpu kms structure
485  * Returns:     Zero on success
486  */
_dpu_kms_setup_displays(struct drm_device * dev,struct msm_drm_private * priv,struct dpu_kms * dpu_kms)487 static int _dpu_kms_setup_displays(struct drm_device *dev,
488 				    struct msm_drm_private *priv,
489 				    struct dpu_kms *dpu_kms)
490 {
491 	/**
492 	 * Extend this function to initialize other
493 	 * types of displays
494 	 */
495 
496 	return _dpu_kms_initialize_dsi(dev, priv, dpu_kms);
497 }
498 
_dpu_kms_drm_obj_destroy(struct dpu_kms * dpu_kms)499 static void _dpu_kms_drm_obj_destroy(struct dpu_kms *dpu_kms)
500 {
501 	struct msm_drm_private *priv;
502 	int i;
503 
504 	if (!dpu_kms) {
505 		DPU_ERROR("invalid dpu_kms\n");
506 		return;
507 	} else if (!dpu_kms->dev) {
508 		DPU_ERROR("invalid dev\n");
509 		return;
510 	} else if (!dpu_kms->dev->dev_private) {
511 		DPU_ERROR("invalid dev_private\n");
512 		return;
513 	}
514 	priv = dpu_kms->dev->dev_private;
515 
516 	for (i = 0; i < priv->num_crtcs; i++)
517 		priv->crtcs[i]->funcs->destroy(priv->crtcs[i]);
518 	priv->num_crtcs = 0;
519 
520 	for (i = 0; i < priv->num_planes; i++)
521 		priv->planes[i]->funcs->destroy(priv->planes[i]);
522 	priv->num_planes = 0;
523 
524 	for (i = 0; i < priv->num_connectors; i++)
525 		priv->connectors[i]->funcs->destroy(priv->connectors[i]);
526 	priv->num_connectors = 0;
527 
528 	for (i = 0; i < priv->num_encoders; i++)
529 		priv->encoders[i]->funcs->destroy(priv->encoders[i]);
530 	priv->num_encoders = 0;
531 }
532 
_dpu_kms_drm_obj_init(struct dpu_kms * dpu_kms)533 static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
534 {
535 	struct drm_device *dev;
536 	struct drm_plane *primary_planes[MAX_PLANES], *plane;
537 	struct drm_crtc *crtc;
538 
539 	struct msm_drm_private *priv;
540 	struct dpu_mdss_cfg *catalog;
541 
542 	int primary_planes_idx = 0, i, ret;
543 	int max_crtc_count;
544 
545 	if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev) {
546 		DPU_ERROR("invalid dpu_kms\n");
547 		return -EINVAL;
548 	}
549 
550 	dev = dpu_kms->dev;
551 	priv = dev->dev_private;
552 	catalog = dpu_kms->catalog;
553 
554 	/*
555 	 * Create encoder and query display drivers to create
556 	 * bridges and connectors
557 	 */
558 	ret = _dpu_kms_setup_displays(dev, priv, dpu_kms);
559 	if (ret)
560 		goto fail;
561 
562 	max_crtc_count = min(catalog->mixer_count, priv->num_encoders);
563 
564 	/* Create the planes */
565 	for (i = 0; i < catalog->sspp_count; i++) {
566 		bool primary = true;
567 
568 		if (catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR)
569 			|| primary_planes_idx >= max_crtc_count)
570 			primary = false;
571 
572 		plane = dpu_plane_init(dev, catalog->sspp[i].id, primary,
573 				(1UL << max_crtc_count) - 1, 0);
574 		if (IS_ERR(plane)) {
575 			DPU_ERROR("dpu_plane_init failed\n");
576 			ret = PTR_ERR(plane);
577 			goto fail;
578 		}
579 		priv->planes[priv->num_planes++] = plane;
580 
581 		if (primary)
582 			primary_planes[primary_planes_idx++] = plane;
583 	}
584 
585 	max_crtc_count = min(max_crtc_count, primary_planes_idx);
586 
587 	/* Create one CRTC per encoder */
588 	for (i = 0; i < max_crtc_count; i++) {
589 		crtc = dpu_crtc_init(dev, primary_planes[i]);
590 		if (IS_ERR(crtc)) {
591 			ret = PTR_ERR(crtc);
592 			goto fail;
593 		}
594 		priv->crtcs[priv->num_crtcs++] = crtc;
595 	}
596 
597 	/* All CRTCs are compatible with all encoders */
598 	for (i = 0; i < priv->num_encoders; i++)
599 		priv->encoders[i]->possible_crtcs = (1 << priv->num_crtcs) - 1;
600 
601 	return 0;
602 fail:
603 	_dpu_kms_drm_obj_destroy(dpu_kms);
604 	return ret;
605 }
606 
607 #ifdef CONFIG_DEBUG_FS
dpu_kms_debugfs_init(struct msm_kms * kms,struct drm_minor * minor)608 static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
609 {
610 	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
611 	struct drm_device *dev;
612 	int rc;
613 
614 	if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev) {
615 		DPU_ERROR("invalid dpu_kms\n");
616 		return -EINVAL;
617 	}
618 
619 	dev = dpu_kms->dev;
620 
621 	rc = _dpu_debugfs_init(dpu_kms);
622 	if (rc)
623 		DPU_ERROR("dpu_debugfs init failed: %d\n", rc);
624 
625 	return rc;
626 }
627 #endif
628 
dpu_kms_round_pixclk(struct msm_kms * kms,unsigned long rate,struct drm_encoder * encoder)629 static long dpu_kms_round_pixclk(struct msm_kms *kms, unsigned long rate,
630 		struct drm_encoder *encoder)
631 {
632 	return rate;
633 }
634 
_dpu_kms_hw_destroy(struct dpu_kms * dpu_kms)635 static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
636 {
637 	struct drm_device *dev;
638 	int i;
639 
640 	dev = dpu_kms->dev;
641 	if (!dev)
642 		return;
643 
644 	if (dpu_kms->hw_intr)
645 		dpu_hw_intr_destroy(dpu_kms->hw_intr);
646 	dpu_kms->hw_intr = NULL;
647 
648 	if (dpu_kms->power_event)
649 		dpu_power_handle_unregister_event(
650 				&dpu_kms->phandle, dpu_kms->power_event);
651 
652 	/* safe to call these more than once during shutdown */
653 	_dpu_debugfs_destroy(dpu_kms);
654 	_dpu_kms_mmu_destroy(dpu_kms);
655 
656 	if (dpu_kms->catalog) {
657 		for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
658 			u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
659 
660 			if ((vbif_idx < VBIF_MAX) && dpu_kms->hw_vbif[vbif_idx])
661 				dpu_hw_vbif_destroy(dpu_kms->hw_vbif[vbif_idx]);
662 		}
663 	}
664 
665 	if (dpu_kms->rm_init)
666 		dpu_rm_destroy(&dpu_kms->rm);
667 	dpu_kms->rm_init = false;
668 
669 	if (dpu_kms->catalog)
670 		dpu_hw_catalog_deinit(dpu_kms->catalog);
671 	dpu_kms->catalog = NULL;
672 
673 	if (dpu_kms->core_client)
674 		dpu_power_client_destroy(&dpu_kms->phandle,
675 			dpu_kms->core_client);
676 	dpu_kms->core_client = NULL;
677 
678 	if (dpu_kms->vbif[VBIF_NRT])
679 		devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_NRT]);
680 	dpu_kms->vbif[VBIF_NRT] = NULL;
681 
682 	if (dpu_kms->vbif[VBIF_RT])
683 		devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_RT]);
684 	dpu_kms->vbif[VBIF_RT] = NULL;
685 
686 	if (dpu_kms->mmio)
687 		devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->mmio);
688 	dpu_kms->mmio = NULL;
689 }
690 
dpu_kms_destroy(struct msm_kms * kms)691 static void dpu_kms_destroy(struct msm_kms *kms)
692 {
693 	struct dpu_kms *dpu_kms;
694 
695 	if (!kms) {
696 		DPU_ERROR("invalid kms\n");
697 		return;
698 	}
699 
700 	dpu_kms = to_dpu_kms(kms);
701 
702 	dpu_dbg_destroy();
703 	_dpu_kms_hw_destroy(dpu_kms);
704 }
705 
dpu_kms_pm_suspend(struct device * dev)706 static int dpu_kms_pm_suspend(struct device *dev)
707 {
708 	struct drm_device *ddev;
709 	struct drm_modeset_acquire_ctx ctx;
710 	struct drm_atomic_state *state;
711 	struct dpu_kms *dpu_kms;
712 	int ret = 0, num_crtcs = 0;
713 
714 	if (!dev)
715 		return -EINVAL;
716 
717 	ddev = dev_get_drvdata(dev);
718 	if (!ddev || !ddev_to_msm_kms(ddev))
719 		return -EINVAL;
720 
721 	dpu_kms = to_dpu_kms(ddev_to_msm_kms(ddev));
722 
723 	/* disable hot-plug polling */
724 	drm_kms_helper_poll_disable(ddev);
725 
726 	/* acquire modeset lock(s) */
727 	drm_modeset_acquire_init(&ctx, 0);
728 
729 retry:
730 	DPU_ATRACE_BEGIN("kms_pm_suspend");
731 
732 	ret = drm_modeset_lock_all_ctx(ddev, &ctx);
733 	if (ret)
734 		goto unlock;
735 
736 	/* save current state for resume */
737 	if (dpu_kms->suspend_state)
738 		drm_atomic_state_put(dpu_kms->suspend_state);
739 	dpu_kms->suspend_state = drm_atomic_helper_duplicate_state(ddev, &ctx);
740 	if (IS_ERR_OR_NULL(dpu_kms->suspend_state)) {
741 		DRM_ERROR("failed to back up suspend state\n");
742 		dpu_kms->suspend_state = NULL;
743 		goto unlock;
744 	}
745 
746 	/* create atomic state to disable all CRTCs */
747 	state = drm_atomic_state_alloc(ddev);
748 	if (IS_ERR_OR_NULL(state)) {
749 		DRM_ERROR("failed to allocate crtc disable state\n");
750 		goto unlock;
751 	}
752 
753 	state->acquire_ctx = &ctx;
754 
755 	/* check for nothing to do */
756 	if (num_crtcs == 0) {
757 		DRM_DEBUG("all crtcs are already in the off state\n");
758 		drm_atomic_state_put(state);
759 		goto suspended;
760 	}
761 
762 	/* commit the "disable all" state */
763 	ret = drm_atomic_commit(state);
764 	if (ret < 0) {
765 		DRM_ERROR("failed to disable crtcs, %d\n", ret);
766 		drm_atomic_state_put(state);
767 		goto unlock;
768 	}
769 
770 suspended:
771 	dpu_kms->suspend_block = true;
772 
773 unlock:
774 	if (ret == -EDEADLK) {
775 		drm_modeset_backoff(&ctx);
776 		goto retry;
777 	}
778 	drm_modeset_drop_locks(&ctx);
779 	drm_modeset_acquire_fini(&ctx);
780 
781 	DPU_ATRACE_END("kms_pm_suspend");
782 	return 0;
783 }
784 
dpu_kms_pm_resume(struct device * dev)785 static int dpu_kms_pm_resume(struct device *dev)
786 {
787 	struct drm_device *ddev;
788 	struct dpu_kms *dpu_kms;
789 	int ret;
790 
791 	if (!dev)
792 		return -EINVAL;
793 
794 	ddev = dev_get_drvdata(dev);
795 	if (!ddev || !ddev_to_msm_kms(ddev))
796 		return -EINVAL;
797 
798 	dpu_kms = to_dpu_kms(ddev_to_msm_kms(ddev));
799 
800 	DPU_ATRACE_BEGIN("kms_pm_resume");
801 
802 	drm_mode_config_reset(ddev);
803 
804 	drm_modeset_lock_all(ddev);
805 
806 	dpu_kms->suspend_block = false;
807 
808 	if (dpu_kms->suspend_state) {
809 		dpu_kms->suspend_state->acquire_ctx =
810 			ddev->mode_config.acquire_ctx;
811 		ret = drm_atomic_commit(dpu_kms->suspend_state);
812 		if (ret < 0) {
813 			DRM_ERROR("failed to restore state, %d\n", ret);
814 			drm_atomic_state_put(dpu_kms->suspend_state);
815 		}
816 		dpu_kms->suspend_state = NULL;
817 	}
818 	drm_modeset_unlock_all(ddev);
819 
820 	/* enable hot-plug polling */
821 	drm_kms_helper_poll_enable(ddev);
822 
823 	DPU_ATRACE_END("kms_pm_resume");
824 	return 0;
825 }
826 
_dpu_kms_set_encoder_mode(struct msm_kms * kms,struct drm_encoder * encoder,bool cmd_mode)827 static void _dpu_kms_set_encoder_mode(struct msm_kms *kms,
828 				 struct drm_encoder *encoder,
829 				 bool cmd_mode)
830 {
831 	struct msm_display_info info;
832 	struct msm_drm_private *priv = encoder->dev->dev_private;
833 	int i, rc = 0;
834 
835 	memset(&info, 0, sizeof(info));
836 
837 	info.intf_type = encoder->encoder_type;
838 	info.capabilities = cmd_mode ? MSM_DISPLAY_CAP_CMD_MODE :
839 			MSM_DISPLAY_CAP_VID_MODE;
840 
841 	/* TODO: No support for DSI swap */
842 	for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
843 		if (priv->dsi[i]) {
844 			info.h_tile_instance[info.num_of_h_tiles] = i;
845 			info.num_of_h_tiles++;
846 		}
847 	}
848 
849 	rc = dpu_encoder_setup(encoder->dev, encoder, &info);
850 	if (rc)
851 		DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
852 			encoder->base.id, rc);
853 }
854 
855 static const struct msm_kms_funcs kms_funcs = {
856 	.hw_init         = dpu_kms_hw_init,
857 	.irq_preinstall  = dpu_irq_preinstall,
858 	.irq_postinstall = dpu_irq_postinstall,
859 	.irq_uninstall   = dpu_irq_uninstall,
860 	.irq             = dpu_irq,
861 	.prepare_commit  = dpu_kms_prepare_commit,
862 	.commit          = dpu_kms_commit,
863 	.complete_commit = dpu_kms_complete_commit,
864 	.wait_for_crtc_commit_done = dpu_kms_wait_for_commit_done,
865 	.enable_vblank   = dpu_kms_enable_vblank,
866 	.disable_vblank  = dpu_kms_disable_vblank,
867 	.check_modified_format = dpu_format_check_modified_format,
868 	.get_format      = dpu_get_msm_format,
869 	.round_pixclk    = dpu_kms_round_pixclk,
870 	.pm_suspend      = dpu_kms_pm_suspend,
871 	.pm_resume       = dpu_kms_pm_resume,
872 	.destroy         = dpu_kms_destroy,
873 	.set_encoder_mode = _dpu_kms_set_encoder_mode,
874 #ifdef CONFIG_DEBUG_FS
875 	.debugfs_init    = dpu_kms_debugfs_init,
876 #endif
877 };
878 
879 /* the caller api needs to turn on clock before calling it */
_dpu_kms_core_hw_rev_init(struct dpu_kms * dpu_kms)880 static inline void _dpu_kms_core_hw_rev_init(struct dpu_kms *dpu_kms)
881 {
882 	dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
883 }
884 
_dpu_kms_mmu_destroy(struct dpu_kms * dpu_kms)885 static int _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms)
886 {
887 	struct msm_mmu *mmu;
888 
889 	mmu = dpu_kms->base.aspace->mmu;
890 
891 	mmu->funcs->detach(mmu, (const char **)iommu_ports,
892 			ARRAY_SIZE(iommu_ports));
893 	msm_gem_address_space_put(dpu_kms->base.aspace);
894 
895 	return 0;
896 }
897 
_dpu_kms_mmu_init(struct dpu_kms * dpu_kms)898 static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
899 {
900 	struct iommu_domain *domain;
901 	struct msm_gem_address_space *aspace;
902 	int ret;
903 
904 	domain = iommu_domain_alloc(&platform_bus_type);
905 	if (!domain)
906 		return 0;
907 
908 	aspace = msm_gem_address_space_create(dpu_kms->dev->dev,
909 			domain, "dpu1");
910 	if (IS_ERR(aspace)) {
911 		ret = PTR_ERR(aspace);
912 		goto fail;
913 	}
914 
915 	dpu_kms->base.aspace = aspace;
916 
917 	ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
918 			ARRAY_SIZE(iommu_ports));
919 	if (ret) {
920 		DPU_ERROR("failed to attach iommu %d\n", ret);
921 		msm_gem_address_space_put(aspace);
922 		goto fail;
923 	}
924 
925 	return 0;
926 fail:
927 	_dpu_kms_mmu_destroy(dpu_kms);
928 
929 	return ret;
930 }
931 
_dpu_kms_get_clk(struct dpu_kms * dpu_kms,char * clock_name)932 static struct dss_clk *_dpu_kms_get_clk(struct dpu_kms *dpu_kms,
933 		char *clock_name)
934 {
935 	struct dss_module_power *mp = &dpu_kms->mp;
936 	int i;
937 
938 	for (i = 0; i < mp->num_clk; i++) {
939 		if (!strcmp(mp->clk_config[i].clk_name, clock_name))
940 			return &mp->clk_config[i];
941 	}
942 
943 	return NULL;
944 }
945 
dpu_kms_get_clk_rate(struct dpu_kms * dpu_kms,char * clock_name)946 u64 dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name)
947 {
948 	struct dss_clk *clk;
949 
950 	clk = _dpu_kms_get_clk(dpu_kms, clock_name);
951 	if (!clk)
952 		return -EINVAL;
953 
954 	return clk_get_rate(clk->clk);
955 }
956 
dpu_kms_handle_power_event(u32 event_type,void * usr)957 static void dpu_kms_handle_power_event(u32 event_type, void *usr)
958 {
959 	struct dpu_kms *dpu_kms = usr;
960 
961 	if (!dpu_kms)
962 		return;
963 
964 	if (event_type == DPU_POWER_EVENT_POST_ENABLE)
965 		dpu_vbif_init_memtypes(dpu_kms);
966 }
967 
dpu_kms_hw_init(struct msm_kms * kms)968 static int dpu_kms_hw_init(struct msm_kms *kms)
969 {
970 	struct dpu_kms *dpu_kms;
971 	struct drm_device *dev;
972 	struct msm_drm_private *priv;
973 	int i, rc = -EINVAL;
974 
975 	if (!kms) {
976 		DPU_ERROR("invalid kms\n");
977 		goto end;
978 	}
979 
980 	dpu_kms = to_dpu_kms(kms);
981 	dev = dpu_kms->dev;
982 	if (!dev) {
983 		DPU_ERROR("invalid device\n");
984 		goto end;
985 	}
986 
987 	rc = dpu_dbg_init(&dpu_kms->pdev->dev);
988 	if (rc) {
989 		DRM_ERROR("failed to init dpu dbg: %d\n", rc);
990 		goto end;
991 	}
992 
993 	priv = dev->dev_private;
994 	if (!priv) {
995 		DPU_ERROR("invalid private data\n");
996 		goto dbg_destroy;
997 	}
998 
999 	dpu_kms->mmio = msm_ioremap(dpu_kms->pdev, "mdp", "mdp");
1000 	if (IS_ERR(dpu_kms->mmio)) {
1001 		rc = PTR_ERR(dpu_kms->mmio);
1002 		DPU_ERROR("mdp register memory map failed: %d\n", rc);
1003 		dpu_kms->mmio = NULL;
1004 		goto error;
1005 	}
1006 	DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
1007 	dpu_kms->mmio_len = dpu_iomap_size(dpu_kms->pdev, "mdp");
1008 
1009 	dpu_kms->vbif[VBIF_RT] = msm_ioremap(dpu_kms->pdev, "vbif", "vbif");
1010 	if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
1011 		rc = PTR_ERR(dpu_kms->vbif[VBIF_RT]);
1012 		DPU_ERROR("vbif register memory map failed: %d\n", rc);
1013 		dpu_kms->vbif[VBIF_RT] = NULL;
1014 		goto error;
1015 	}
1016 	dpu_kms->vbif_len[VBIF_RT] = dpu_iomap_size(dpu_kms->pdev, "vbif");
1017 	dpu_kms->vbif[VBIF_NRT] = msm_ioremap(dpu_kms->pdev, "vbif_nrt", "vbif_nrt");
1018 	if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) {
1019 		dpu_kms->vbif[VBIF_NRT] = NULL;
1020 		DPU_DEBUG("VBIF NRT is not defined");
1021 	} else {
1022 		dpu_kms->vbif_len[VBIF_NRT] = dpu_iomap_size(dpu_kms->pdev,
1023 							     "vbif_nrt");
1024 	}
1025 
1026 	dpu_kms->reg_dma = msm_ioremap(dpu_kms->pdev, "regdma", "regdma");
1027 	if (IS_ERR(dpu_kms->reg_dma)) {
1028 		dpu_kms->reg_dma = NULL;
1029 		DPU_DEBUG("REG_DMA is not defined");
1030 	} else {
1031 		dpu_kms->reg_dma_len = dpu_iomap_size(dpu_kms->pdev, "regdma");
1032 	}
1033 
1034 	dpu_kms->core_client = dpu_power_client_create(&dpu_kms->phandle,
1035 					"core");
1036 	if (IS_ERR_OR_NULL(dpu_kms->core_client)) {
1037 		rc = PTR_ERR(dpu_kms->core_client);
1038 		if (!dpu_kms->core_client)
1039 			rc = -EINVAL;
1040 		DPU_ERROR("dpu power client create failed: %d\n", rc);
1041 		dpu_kms->core_client = NULL;
1042 		goto error;
1043 	}
1044 
1045 	pm_runtime_get_sync(&dpu_kms->pdev->dev);
1046 
1047 	_dpu_kms_core_hw_rev_init(dpu_kms);
1048 
1049 	pr_info("dpu hardware revision:0x%x\n", dpu_kms->core_rev);
1050 
1051 	dpu_kms->catalog = dpu_hw_catalog_init(dpu_kms->core_rev);
1052 	if (IS_ERR_OR_NULL(dpu_kms->catalog)) {
1053 		rc = PTR_ERR(dpu_kms->catalog);
1054 		if (!dpu_kms->catalog)
1055 			rc = -EINVAL;
1056 		DPU_ERROR("catalog init failed: %d\n", rc);
1057 		dpu_kms->catalog = NULL;
1058 		goto power_error;
1059 	}
1060 
1061 	dpu_dbg_init_dbg_buses(dpu_kms->core_rev);
1062 
1063 	/*
1064 	 * Now we need to read the HW catalog and initialize resources such as
1065 	 * clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc
1066 	 */
1067 	rc = _dpu_kms_mmu_init(dpu_kms);
1068 	if (rc) {
1069 		DPU_ERROR("dpu_kms_mmu_init failed: %d\n", rc);
1070 		goto power_error;
1071 	}
1072 
1073 	rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mmio,
1074 			dpu_kms->dev);
1075 	if (rc) {
1076 		DPU_ERROR("rm init failed: %d\n", rc);
1077 		goto power_error;
1078 	}
1079 
1080 	dpu_kms->rm_init = true;
1081 
1082 	dpu_kms->hw_mdp = dpu_rm_get_mdp(&dpu_kms->rm);
1083 	if (IS_ERR_OR_NULL(dpu_kms->hw_mdp)) {
1084 		rc = PTR_ERR(dpu_kms->hw_mdp);
1085 		if (!dpu_kms->hw_mdp)
1086 			rc = -EINVAL;
1087 		DPU_ERROR("failed to get hw_mdp: %d\n", rc);
1088 		dpu_kms->hw_mdp = NULL;
1089 		goto power_error;
1090 	}
1091 
1092 	for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
1093 		u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
1094 
1095 		dpu_kms->hw_vbif[i] = dpu_hw_vbif_init(vbif_idx,
1096 				dpu_kms->vbif[vbif_idx], dpu_kms->catalog);
1097 		if (IS_ERR_OR_NULL(dpu_kms->hw_vbif[vbif_idx])) {
1098 			rc = PTR_ERR(dpu_kms->hw_vbif[vbif_idx]);
1099 			if (!dpu_kms->hw_vbif[vbif_idx])
1100 				rc = -EINVAL;
1101 			DPU_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc);
1102 			dpu_kms->hw_vbif[vbif_idx] = NULL;
1103 			goto power_error;
1104 		}
1105 	}
1106 
1107 	rc = dpu_core_perf_init(&dpu_kms->perf, dev, dpu_kms->catalog,
1108 			&dpu_kms->phandle,
1109 			_dpu_kms_get_clk(dpu_kms, "core"));
1110 	if (rc) {
1111 		DPU_ERROR("failed to init perf %d\n", rc);
1112 		goto perf_err;
1113 	}
1114 
1115 	dpu_kms->hw_intr = dpu_hw_intr_init(dpu_kms->mmio, dpu_kms->catalog);
1116 	if (IS_ERR_OR_NULL(dpu_kms->hw_intr)) {
1117 		rc = PTR_ERR(dpu_kms->hw_intr);
1118 		DPU_ERROR("hw_intr init failed: %d\n", rc);
1119 		dpu_kms->hw_intr = NULL;
1120 		goto hw_intr_init_err;
1121 	}
1122 
1123 	/*
1124 	 * _dpu_kms_drm_obj_init should create the DRM related objects
1125 	 * i.e. CRTCs, planes, encoders, connectors and so forth
1126 	 */
1127 	rc = _dpu_kms_drm_obj_init(dpu_kms);
1128 	if (rc) {
1129 		DPU_ERROR("modeset init failed: %d\n", rc);
1130 		goto drm_obj_init_err;
1131 	}
1132 
1133 	dev->mode_config.min_width = 0;
1134 	dev->mode_config.min_height = 0;
1135 
1136 	/*
1137 	 * max crtc width is equal to the max mixer width * 2 and max height is
1138 	 * is 4K
1139 	 */
1140 	dev->mode_config.max_width =
1141 			dpu_kms->catalog->caps->max_mixer_width * 2;
1142 	dev->mode_config.max_height = 4096;
1143 
1144 	/*
1145 	 * Support format modifiers for compression etc.
1146 	 */
1147 	dev->mode_config.allow_fb_modifiers = true;
1148 
1149 	/*
1150 	 * Handle (re)initializations during power enable
1151 	 */
1152 	dpu_kms_handle_power_event(DPU_POWER_EVENT_POST_ENABLE, dpu_kms);
1153 	dpu_kms->power_event = dpu_power_handle_register_event(
1154 			&dpu_kms->phandle,
1155 			DPU_POWER_EVENT_POST_ENABLE,
1156 			dpu_kms_handle_power_event, dpu_kms, "kms");
1157 
1158 	pm_runtime_put_sync(&dpu_kms->pdev->dev);
1159 
1160 	return 0;
1161 
1162 drm_obj_init_err:
1163 	dpu_core_perf_destroy(&dpu_kms->perf);
1164 hw_intr_init_err:
1165 perf_err:
1166 power_error:
1167 	pm_runtime_put_sync(&dpu_kms->pdev->dev);
1168 error:
1169 	_dpu_kms_hw_destroy(dpu_kms);
1170 dbg_destroy:
1171 	dpu_dbg_destroy();
1172 end:
1173 	return rc;
1174 }
1175 
dpu_kms_init(struct drm_device * dev)1176 struct msm_kms *dpu_kms_init(struct drm_device *dev)
1177 {
1178 	struct msm_drm_private *priv;
1179 	struct dpu_kms *dpu_kms;
1180 	int irq;
1181 
1182 	if (!dev || !dev->dev_private) {
1183 		DPU_ERROR("drm device node invalid\n");
1184 		return ERR_PTR(-EINVAL);
1185 	}
1186 
1187 	priv = dev->dev_private;
1188 	dpu_kms = to_dpu_kms(priv->kms);
1189 
1190 	irq = irq_of_parse_and_map(dpu_kms->pdev->dev.of_node, 0);
1191 	if (irq < 0) {
1192 		DPU_ERROR("failed to get irq: %d\n", irq);
1193 		return ERR_PTR(irq);
1194 	}
1195 	dpu_kms->base.irq = irq;
1196 
1197 	return &dpu_kms->base;
1198 }
1199 
dpu_bind(struct device * dev,struct device * master,void * data)1200 static int dpu_bind(struct device *dev, struct device *master, void *data)
1201 {
1202 	struct drm_device *ddev = dev_get_drvdata(master);
1203 	struct platform_device *pdev = to_platform_device(dev);
1204 	struct msm_drm_private *priv = ddev->dev_private;
1205 	struct dpu_kms *dpu_kms;
1206 	struct dss_module_power *mp;
1207 	int ret = 0;
1208 
1209 	dpu_kms = devm_kzalloc(&pdev->dev, sizeof(*dpu_kms), GFP_KERNEL);
1210 	if (!dpu_kms)
1211 		return -ENOMEM;
1212 
1213 	mp = &dpu_kms->mp;
1214 	ret = msm_dss_parse_clock(pdev, mp);
1215 	if (ret) {
1216 		DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
1217 		return ret;
1218 	}
1219 
1220 	dpu_power_resource_init(pdev, &dpu_kms->phandle);
1221 
1222 	platform_set_drvdata(pdev, dpu_kms);
1223 
1224 	msm_kms_init(&dpu_kms->base, &kms_funcs);
1225 	dpu_kms->dev = ddev;
1226 	dpu_kms->pdev = pdev;
1227 
1228 	pm_runtime_enable(&pdev->dev);
1229 	dpu_kms->rpm_enabled = true;
1230 
1231 	priv->kms = &dpu_kms->base;
1232 	return ret;
1233 }
1234 
dpu_unbind(struct device * dev,struct device * master,void * data)1235 static void dpu_unbind(struct device *dev, struct device *master, void *data)
1236 {
1237 	struct platform_device *pdev = to_platform_device(dev);
1238 	struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
1239 	struct dss_module_power *mp = &dpu_kms->mp;
1240 
1241 	dpu_power_resource_deinit(pdev, &dpu_kms->phandle);
1242 	msm_dss_put_clk(mp->clk_config, mp->num_clk);
1243 	devm_kfree(&pdev->dev, mp->clk_config);
1244 	mp->num_clk = 0;
1245 
1246 	if (dpu_kms->rpm_enabled)
1247 		pm_runtime_disable(&pdev->dev);
1248 }
1249 
1250 static const struct component_ops dpu_ops = {
1251 	.bind   = dpu_bind,
1252 	.unbind = dpu_unbind,
1253 };
1254 
dpu_dev_probe(struct platform_device * pdev)1255 static int dpu_dev_probe(struct platform_device *pdev)
1256 {
1257 	return component_add(&pdev->dev, &dpu_ops);
1258 }
1259 
dpu_dev_remove(struct platform_device * pdev)1260 static int dpu_dev_remove(struct platform_device *pdev)
1261 {
1262 	component_del(&pdev->dev, &dpu_ops);
1263 	return 0;
1264 }
1265 
dpu_runtime_suspend(struct device * dev)1266 static int __maybe_unused dpu_runtime_suspend(struct device *dev)
1267 {
1268 	int rc = -1;
1269 	struct platform_device *pdev = to_platform_device(dev);
1270 	struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
1271 	struct drm_device *ddev;
1272 	struct dss_module_power *mp = &dpu_kms->mp;
1273 
1274 	ddev = dpu_kms->dev;
1275 	if (!ddev) {
1276 		DPU_ERROR("invalid drm_device\n");
1277 		goto exit;
1278 	}
1279 
1280 	rc = dpu_power_resource_enable(&dpu_kms->phandle,
1281 			dpu_kms->core_client, false);
1282 	if (rc)
1283 		DPU_ERROR("resource disable failed: %d\n", rc);
1284 
1285 	rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
1286 	if (rc)
1287 		DPU_ERROR("clock disable failed rc:%d\n", rc);
1288 
1289 exit:
1290 	return rc;
1291 }
1292 
dpu_runtime_resume(struct device * dev)1293 static int __maybe_unused dpu_runtime_resume(struct device *dev)
1294 {
1295 	int rc = -1;
1296 	struct platform_device *pdev = to_platform_device(dev);
1297 	struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
1298 	struct drm_device *ddev;
1299 	struct dss_module_power *mp = &dpu_kms->mp;
1300 
1301 	ddev = dpu_kms->dev;
1302 	if (!ddev) {
1303 		DPU_ERROR("invalid drm_device\n");
1304 		goto exit;
1305 	}
1306 
1307 	rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
1308 	if (rc) {
1309 		DPU_ERROR("clock enable failed rc:%d\n", rc);
1310 		goto exit;
1311 	}
1312 
1313 	rc = dpu_power_resource_enable(&dpu_kms->phandle,
1314 			dpu_kms->core_client, true);
1315 	if (rc)
1316 		DPU_ERROR("resource enable failed: %d\n", rc);
1317 
1318 exit:
1319 	return rc;
1320 }
1321 
1322 static const struct dev_pm_ops dpu_pm_ops = {
1323 	SET_RUNTIME_PM_OPS(dpu_runtime_suspend, dpu_runtime_resume, NULL)
1324 };
1325 
1326 static const struct of_device_id dpu_dt_match[] = {
1327 	{ .compatible = "qcom,sdm845-dpu", },
1328 	{}
1329 };
1330 MODULE_DEVICE_TABLE(of, dpu_dt_match);
1331 
1332 static struct platform_driver dpu_driver = {
1333 	.probe = dpu_dev_probe,
1334 	.remove = dpu_dev_remove,
1335 	.driver = {
1336 		.name = "msm_dpu",
1337 		.of_match_table = dpu_dt_match,
1338 		.pm = &dpu_pm_ops,
1339 	},
1340 };
1341 
msm_dpu_register(void)1342 void __init msm_dpu_register(void)
1343 {
1344 	platform_driver_register(&dpu_driver);
1345 }
1346 
msm_dpu_unregister(void)1347 void __exit msm_dpu_unregister(void)
1348 {
1349 	platform_driver_unregister(&dpu_driver);
1350 }
1351