• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
3  */
4 
5 #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
6 
7 #include <linux/debugfs.h>
8 #include <linux/irqdomain.h>
9 #include <linux/irq.h>
10 #include <linux/kthread.h>
11 
12 #include "dpu_core_irq.h"
13 #include "dpu_trace.h"
14 
15 /**
16  * dpu_core_irq_callback_handler - dispatch core interrupts
17  * @arg:		private data of callback handler
18  * @irq_idx:		interrupt index
19  */
dpu_core_irq_callback_handler(void * arg,int irq_idx)20 static void dpu_core_irq_callback_handler(void *arg, int irq_idx)
21 {
22 	struct dpu_kms *dpu_kms = arg;
23 	struct dpu_irq *irq_obj = &dpu_kms->irq_obj;
24 	struct dpu_irq_callback *cb;
25 	unsigned long irq_flags;
26 
27 	pr_debug("irq_idx=%d\n", irq_idx);
28 
29 	if (list_empty(&irq_obj->irq_cb_tbl[irq_idx])) {
30 		DRM_ERROR("no registered cb, idx:%d enable_count:%d\n", irq_idx,
31 			atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]));
32 	}
33 
34 	atomic_inc(&irq_obj->irq_counts[irq_idx]);
35 
36 	/*
37 	 * Perform registered function callback
38 	 */
39 	spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
40 	list_for_each_entry(cb, &irq_obj->irq_cb_tbl[irq_idx], list)
41 		if (cb->func)
42 			cb->func(cb->arg, irq_idx);
43 	spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
44 
45 	/*
46 	 * Clear pending interrupt status in HW.
47 	 * NOTE: dpu_core_irq_callback_handler is protected by top-level
48 	 *       spinlock, so it is safe to clear any interrupt status here.
49 	 */
50 	dpu_kms->hw_intr->ops.clear_intr_status_nolock(
51 			dpu_kms->hw_intr,
52 			irq_idx);
53 }
54 
dpu_core_irq_idx_lookup(struct dpu_kms * dpu_kms,enum dpu_intr_type intr_type,u32 instance_idx)55 int dpu_core_irq_idx_lookup(struct dpu_kms *dpu_kms,
56 		enum dpu_intr_type intr_type, u32 instance_idx)
57 {
58 	if (!dpu_kms || !dpu_kms->hw_intr ||
59 			!dpu_kms->hw_intr->ops.irq_idx_lookup)
60 		return -EINVAL;
61 
62 	return dpu_kms->hw_intr->ops.irq_idx_lookup(intr_type,
63 			instance_idx);
64 }
65 
66 /**
67  * _dpu_core_irq_enable - enable core interrupt given by the index
68  * @dpu_kms:		Pointer to dpu kms context
69  * @irq_idx:		interrupt index
70  */
_dpu_core_irq_enable(struct dpu_kms * dpu_kms,int irq_idx)71 static int _dpu_core_irq_enable(struct dpu_kms *dpu_kms, int irq_idx)
72 {
73 	unsigned long irq_flags;
74 	int ret = 0, enable_count;
75 
76 	if (!dpu_kms || !dpu_kms->hw_intr ||
77 			!dpu_kms->irq_obj.enable_counts ||
78 			!dpu_kms->irq_obj.irq_counts) {
79 		DPU_ERROR("invalid params\n");
80 		return -EINVAL;
81 	}
82 
83 	if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
84 		DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
85 		return -EINVAL;
86 	}
87 
88 	enable_count = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]);
89 	DRM_DEBUG_KMS("irq_idx=%d enable_count=%d\n", irq_idx, enable_count);
90 	trace_dpu_core_irq_enable_idx(irq_idx, enable_count);
91 
92 	if (atomic_inc_return(&dpu_kms->irq_obj.enable_counts[irq_idx]) == 1) {
93 		ret = dpu_kms->hw_intr->ops.enable_irq(
94 				dpu_kms->hw_intr,
95 				irq_idx);
96 		if (ret)
97 			DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
98 					irq_idx);
99 
100 		DPU_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
101 
102 		spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
103 		/* empty callback list but interrupt is enabled */
104 		if (list_empty(&dpu_kms->irq_obj.irq_cb_tbl[irq_idx]))
105 			DPU_ERROR("irq_idx=%d enabled with no callback\n",
106 					irq_idx);
107 		spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
108 	}
109 
110 	return ret;
111 }
112 
dpu_core_irq_enable(struct dpu_kms * dpu_kms,int * irq_idxs,u32 irq_count)113 int dpu_core_irq_enable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count)
114 {
115 	int i, ret = 0, counts;
116 
117 	if (!dpu_kms || !irq_idxs || !irq_count) {
118 		DPU_ERROR("invalid params\n");
119 		return -EINVAL;
120 	}
121 
122 	counts = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idxs[0]]);
123 	if (counts)
124 		DRM_ERROR("irq_idx=%d enable_count=%d\n", irq_idxs[0], counts);
125 
126 	for (i = 0; (i < irq_count) && !ret; i++)
127 		ret = _dpu_core_irq_enable(dpu_kms, irq_idxs[i]);
128 
129 	return ret;
130 }
131 
132 /**
133  * _dpu_core_irq_disable - disable core interrupt given by the index
134  * @dpu_kms:		Pointer to dpu kms context
135  * @irq_idx:		interrupt index
136  */
_dpu_core_irq_disable(struct dpu_kms * dpu_kms,int irq_idx)137 static int _dpu_core_irq_disable(struct dpu_kms *dpu_kms, int irq_idx)
138 {
139 	int ret = 0, enable_count;
140 
141 	if (!dpu_kms || !dpu_kms->hw_intr || !dpu_kms->irq_obj.enable_counts) {
142 		DPU_ERROR("invalid params\n");
143 		return -EINVAL;
144 	}
145 
146 	if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
147 		DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
148 		return -EINVAL;
149 	}
150 
151 	enable_count = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]);
152 	DRM_DEBUG_KMS("irq_idx=%d enable_count=%d\n", irq_idx, enable_count);
153 	trace_dpu_core_irq_disable_idx(irq_idx, enable_count);
154 
155 	if (atomic_dec_return(&dpu_kms->irq_obj.enable_counts[irq_idx]) == 0) {
156 		ret = dpu_kms->hw_intr->ops.disable_irq(
157 				dpu_kms->hw_intr,
158 				irq_idx);
159 		if (ret)
160 			DPU_ERROR("Fail to disable IRQ for irq_idx:%d\n",
161 					irq_idx);
162 		DPU_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
163 	}
164 
165 	return ret;
166 }
167 
dpu_core_irq_disable(struct dpu_kms * dpu_kms,int * irq_idxs,u32 irq_count)168 int dpu_core_irq_disable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count)
169 {
170 	int i, ret = 0, counts;
171 
172 	if (!dpu_kms || !irq_idxs || !irq_count) {
173 		DPU_ERROR("invalid params\n");
174 		return -EINVAL;
175 	}
176 
177 	counts = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idxs[0]]);
178 	if (counts == 2)
179 		DRM_ERROR("irq_idx=%d enable_count=%d\n", irq_idxs[0], counts);
180 
181 	for (i = 0; (i < irq_count) && !ret; i++)
182 		ret = _dpu_core_irq_disable(dpu_kms, irq_idxs[i]);
183 
184 	return ret;
185 }
186 
dpu_core_irq_read(struct dpu_kms * dpu_kms,int irq_idx,bool clear)187 u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx, bool clear)
188 {
189 	if (!dpu_kms || !dpu_kms->hw_intr ||
190 			!dpu_kms->hw_intr->ops.get_interrupt_status)
191 		return 0;
192 
193 	if (irq_idx < 0) {
194 		DPU_ERROR("[%pS] invalid irq_idx=%d\n",
195 				__builtin_return_address(0), irq_idx);
196 		return 0;
197 	}
198 
199 	return dpu_kms->hw_intr->ops.get_interrupt_status(dpu_kms->hw_intr,
200 			irq_idx, clear);
201 }
202 
dpu_core_irq_register_callback(struct dpu_kms * dpu_kms,int irq_idx,struct dpu_irq_callback * register_irq_cb)203 int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
204 		struct dpu_irq_callback *register_irq_cb)
205 {
206 	unsigned long irq_flags;
207 
208 	if (!dpu_kms || !dpu_kms->irq_obj.irq_cb_tbl) {
209 		DPU_ERROR("invalid params\n");
210 		return -EINVAL;
211 	}
212 
213 	if (!register_irq_cb || !register_irq_cb->func) {
214 		DPU_ERROR("invalid irq_cb:%d func:%d\n",
215 				register_irq_cb != NULL,
216 				register_irq_cb ?
217 					register_irq_cb->func != NULL : -1);
218 		return -EINVAL;
219 	}
220 
221 	if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
222 		DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
223 		return -EINVAL;
224 	}
225 
226 	DPU_DEBUG("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
227 
228 	spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
229 	trace_dpu_core_irq_register_callback(irq_idx, register_irq_cb);
230 	list_del_init(&register_irq_cb->list);
231 	list_add_tail(&register_irq_cb->list,
232 			&dpu_kms->irq_obj.irq_cb_tbl[irq_idx]);
233 	spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
234 
235 	return 0;
236 }
237 
dpu_core_irq_unregister_callback(struct dpu_kms * dpu_kms,int irq_idx,struct dpu_irq_callback * register_irq_cb)238 int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
239 		struct dpu_irq_callback *register_irq_cb)
240 {
241 	unsigned long irq_flags;
242 
243 	if (!dpu_kms || !dpu_kms->irq_obj.irq_cb_tbl) {
244 		DPU_ERROR("invalid params\n");
245 		return -EINVAL;
246 	}
247 
248 	if (!register_irq_cb || !register_irq_cb->func) {
249 		DPU_ERROR("invalid irq_cb:%d func:%d\n",
250 				register_irq_cb != NULL,
251 				register_irq_cb ?
252 					register_irq_cb->func != NULL : -1);
253 		return -EINVAL;
254 	}
255 
256 	if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
257 		DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
258 		return -EINVAL;
259 	}
260 
261 	DPU_DEBUG("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
262 
263 	spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
264 	trace_dpu_core_irq_unregister_callback(irq_idx, register_irq_cb);
265 	list_del_init(&register_irq_cb->list);
266 	/* empty callback list but interrupt is still enabled */
267 	if (list_empty(&dpu_kms->irq_obj.irq_cb_tbl[irq_idx]) &&
268 			atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]))
269 		DPU_ERROR("irq_idx=%d enabled with no callback\n", irq_idx);
270 	spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
271 
272 	return 0;
273 }
274 
dpu_clear_all_irqs(struct dpu_kms * dpu_kms)275 static void dpu_clear_all_irqs(struct dpu_kms *dpu_kms)
276 {
277 	if (!dpu_kms || !dpu_kms->hw_intr ||
278 			!dpu_kms->hw_intr->ops.clear_all_irqs)
279 		return;
280 
281 	dpu_kms->hw_intr->ops.clear_all_irqs(dpu_kms->hw_intr);
282 }
283 
dpu_disable_all_irqs(struct dpu_kms * dpu_kms)284 static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
285 {
286 	if (!dpu_kms || !dpu_kms->hw_intr ||
287 			!dpu_kms->hw_intr->ops.disable_all_irqs)
288 		return;
289 
290 	dpu_kms->hw_intr->ops.disable_all_irqs(dpu_kms->hw_intr);
291 }
292 
293 #ifdef CONFIG_DEBUG_FS
294 #define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix)				\
295 static int __prefix ## _open(struct inode *inode, struct file *file)	\
296 {									\
297 	return single_open(file, __prefix ## _show, inode->i_private);	\
298 }									\
299 static const struct file_operations __prefix ## _fops = {		\
300 	.owner = THIS_MODULE,						\
301 	.open = __prefix ## _open,					\
302 	.release = single_release,					\
303 	.read = seq_read,						\
304 	.llseek = seq_lseek,						\
305 }
306 
dpu_debugfs_core_irq_show(struct seq_file * s,void * v)307 static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
308 {
309 	struct dpu_irq *irq_obj = s->private;
310 	struct dpu_irq_callback *cb;
311 	unsigned long irq_flags;
312 	int i, irq_count, enable_count, cb_count;
313 
314 	if (WARN_ON(!irq_obj->enable_counts || !irq_obj->irq_cb_tbl))
315 		return 0;
316 
317 	for (i = 0; i < irq_obj->total_irqs; i++) {
318 		spin_lock_irqsave(&irq_obj->cb_lock, irq_flags);
319 		cb_count = 0;
320 		irq_count = atomic_read(&irq_obj->irq_counts[i]);
321 		enable_count = atomic_read(&irq_obj->enable_counts[i]);
322 		list_for_each_entry(cb, &irq_obj->irq_cb_tbl[i], list)
323 			cb_count++;
324 		spin_unlock_irqrestore(&irq_obj->cb_lock, irq_flags);
325 
326 		if (irq_count || enable_count || cb_count)
327 			seq_printf(s, "idx:%d irq:%d enable:%d cb:%d\n",
328 					i, irq_count, enable_count, cb_count);
329 	}
330 
331 	return 0;
332 }
333 
334 DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_core_irq);
335 
dpu_debugfs_core_irq_init(struct dpu_kms * dpu_kms,struct dentry * parent)336 void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
337 		struct dentry *parent)
338 {
339 	debugfs_create_file("core_irq", 0600, parent, &dpu_kms->irq_obj,
340 		&dpu_debugfs_core_irq_fops);
341 }
342 #endif
343 
dpu_core_irq_preinstall(struct dpu_kms * dpu_kms)344 void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
345 {
346 	struct msm_drm_private *priv;
347 	int i;
348 
349 	if (!dpu_kms->dev) {
350 		DPU_ERROR("invalid drm device\n");
351 		return;
352 	} else if (!dpu_kms->dev->dev_private) {
353 		DPU_ERROR("invalid device private\n");
354 		return;
355 	}
356 	priv = dpu_kms->dev->dev_private;
357 
358 	pm_runtime_get_sync(&dpu_kms->pdev->dev);
359 	dpu_clear_all_irqs(dpu_kms);
360 	dpu_disable_all_irqs(dpu_kms);
361 	pm_runtime_put_sync(&dpu_kms->pdev->dev);
362 
363 	spin_lock_init(&dpu_kms->irq_obj.cb_lock);
364 
365 	/* Create irq callbacks for all possible irq_idx */
366 	dpu_kms->irq_obj.total_irqs = dpu_kms->hw_intr->irq_idx_tbl_size;
367 	dpu_kms->irq_obj.irq_cb_tbl = kcalloc(dpu_kms->irq_obj.total_irqs,
368 			sizeof(struct list_head), GFP_KERNEL);
369 	dpu_kms->irq_obj.enable_counts = kcalloc(dpu_kms->irq_obj.total_irqs,
370 			sizeof(atomic_t), GFP_KERNEL);
371 	dpu_kms->irq_obj.irq_counts = kcalloc(dpu_kms->irq_obj.total_irqs,
372 			sizeof(atomic_t), GFP_KERNEL);
373 	for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++) {
374 		INIT_LIST_HEAD(&dpu_kms->irq_obj.irq_cb_tbl[i]);
375 		atomic_set(&dpu_kms->irq_obj.enable_counts[i], 0);
376 		atomic_set(&dpu_kms->irq_obj.irq_counts[i], 0);
377 	}
378 }
379 
dpu_core_irq_uninstall(struct dpu_kms * dpu_kms)380 void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms)
381 {
382 	struct msm_drm_private *priv;
383 	int i;
384 
385 	if (!dpu_kms->dev) {
386 		DPU_ERROR("invalid drm device\n");
387 		return;
388 	} else if (!dpu_kms->dev->dev_private) {
389 		DPU_ERROR("invalid device private\n");
390 		return;
391 	}
392 	priv = dpu_kms->dev->dev_private;
393 
394 	pm_runtime_get_sync(&dpu_kms->pdev->dev);
395 	for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++)
396 		if (atomic_read(&dpu_kms->irq_obj.enable_counts[i]) ||
397 				!list_empty(&dpu_kms->irq_obj.irq_cb_tbl[i]))
398 			DPU_ERROR("irq_idx=%d still enabled/registered\n", i);
399 
400 	dpu_clear_all_irqs(dpu_kms);
401 	dpu_disable_all_irqs(dpu_kms);
402 	pm_runtime_put_sync(&dpu_kms->pdev->dev);
403 
404 	kfree(dpu_kms->irq_obj.irq_cb_tbl);
405 	kfree(dpu_kms->irq_obj.enable_counts);
406 	kfree(dpu_kms->irq_obj.irq_counts);
407 	dpu_kms->irq_obj.irq_cb_tbl = NULL;
408 	dpu_kms->irq_obj.enable_counts = NULL;
409 	dpu_kms->irq_obj.irq_counts = NULL;
410 	dpu_kms->irq_obj.total_irqs = 0;
411 }
412 
dpu_core_irq(struct dpu_kms * dpu_kms)413 irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms)
414 {
415 	/*
416 	 * Read interrupt status from all sources. Interrupt status are
417 	 * stored within hw_intr.
418 	 * Function will also clear the interrupt status after reading.
419 	 * Individual interrupt status bit will only get stored if it
420 	 * is enabled.
421 	 */
422 	dpu_kms->hw_intr->ops.get_interrupt_statuses(dpu_kms->hw_intr);
423 
424 	/*
425 	 * Dispatch to HW driver to handle interrupt lookup that is being
426 	 * fired. When matching interrupt is located, HW driver will call to
427 	 * dpu_core_irq_callback_handler with the irq_idx from the lookup table.
428 	 * dpu_core_irq_callback_handler will perform the registered function
429 	 * callback, and do the interrupt status clearing once the registered
430 	 * callback is finished.
431 	 */
432 	dpu_kms->hw_intr->ops.dispatch_irqs(
433 			dpu_kms->hw_intr,
434 			dpu_core_irq_callback_handler,
435 			dpu_kms);
436 
437 	return IRQ_HANDLED;
438 }
439