• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
3  */
4 
5 #include <linux/bitops.h>
6 #include <linux/slab.h>
7 
8 #include "dpu_kms.h"
9 #include "dpu_hw_interrupts.h"
10 #include "dpu_hw_util.h"
11 #include "dpu_hw_mdss.h"
12 
13 /**
14  * Register offsets in MDSS register file for the interrupt registers
15  * w.r.t. the MDP base
16  */
17 #define MDP_SSPP_TOP0_OFF		0x0
18 #define MDP_INTF_0_OFF			0x6A000
19 #define MDP_INTF_1_OFF			0x6A800
20 #define MDP_INTF_2_OFF			0x6B000
21 #define MDP_INTF_3_OFF			0x6B800
22 #define MDP_INTF_4_OFF			0x6C000
23 #define MDP_INTF_5_OFF			0x6C800
24 #define INTF_INTR_EN			0x1c0
25 #define INTF_INTR_STATUS		0x1c4
26 #define INTF_INTR_CLEAR			0x1c8
27 #define MDP_AD4_0_OFF			0x7C000
28 #define MDP_AD4_1_OFF			0x7D000
29 #define MDP_AD4_INTR_EN_OFF		0x41c
30 #define MDP_AD4_INTR_CLEAR_OFF		0x424
31 #define MDP_AD4_INTR_STATUS_OFF		0x420
32 #define MDP_INTF_0_OFF_REV_7xxx             0x34000
33 #define MDP_INTF_1_OFF_REV_7xxx             0x35000
34 #define MDP_INTF_5_OFF_REV_7xxx             0x39000
35 
36 /**
37  * struct dpu_intr_reg - array of DPU register sets
38  * @clr_off:	offset to CLEAR reg
39  * @en_off:	offset to ENABLE reg
40  * @status_off:	offset to STATUS reg
41  */
42 struct dpu_intr_reg {
43 	u32 clr_off;
44 	u32 en_off;
45 	u32 status_off;
46 };
47 
48 /*
49  * struct dpu_intr_reg -  List of DPU interrupt registers
50  *
51  * When making changes be sure to sync with dpu_hw_intr_reg
52  */
53 static const struct dpu_intr_reg dpu_intr_set[] = {
54 	{
55 		MDP_SSPP_TOP0_OFF+INTR_CLEAR,
56 		MDP_SSPP_TOP0_OFF+INTR_EN,
57 		MDP_SSPP_TOP0_OFF+INTR_STATUS
58 	},
59 	{
60 		MDP_SSPP_TOP0_OFF+INTR2_CLEAR,
61 		MDP_SSPP_TOP0_OFF+INTR2_EN,
62 		MDP_SSPP_TOP0_OFF+INTR2_STATUS
63 	},
64 	{
65 		MDP_SSPP_TOP0_OFF+HIST_INTR_CLEAR,
66 		MDP_SSPP_TOP0_OFF+HIST_INTR_EN,
67 		MDP_SSPP_TOP0_OFF+HIST_INTR_STATUS
68 	},
69 	{
70 		MDP_INTF_0_OFF+INTF_INTR_CLEAR,
71 		MDP_INTF_0_OFF+INTF_INTR_EN,
72 		MDP_INTF_0_OFF+INTF_INTR_STATUS
73 	},
74 	{
75 		MDP_INTF_1_OFF+INTF_INTR_CLEAR,
76 		MDP_INTF_1_OFF+INTF_INTR_EN,
77 		MDP_INTF_1_OFF+INTF_INTR_STATUS
78 	},
79 	{
80 		MDP_INTF_2_OFF+INTF_INTR_CLEAR,
81 		MDP_INTF_2_OFF+INTF_INTR_EN,
82 		MDP_INTF_2_OFF+INTF_INTR_STATUS
83 	},
84 	{
85 		MDP_INTF_3_OFF+INTF_INTR_CLEAR,
86 		MDP_INTF_3_OFF+INTF_INTR_EN,
87 		MDP_INTF_3_OFF+INTF_INTR_STATUS
88 	},
89 	{
90 		MDP_INTF_4_OFF+INTF_INTR_CLEAR,
91 		MDP_INTF_4_OFF+INTF_INTR_EN,
92 		MDP_INTF_4_OFF+INTF_INTR_STATUS
93 	},
94 	{
95 		MDP_INTF_5_OFF+INTF_INTR_CLEAR,
96 		MDP_INTF_5_OFF+INTF_INTR_EN,
97 		MDP_INTF_5_OFF+INTF_INTR_STATUS
98 	},
99 	{
100 		MDP_AD4_0_OFF + MDP_AD4_INTR_CLEAR_OFF,
101 		MDP_AD4_0_OFF + MDP_AD4_INTR_EN_OFF,
102 		MDP_AD4_0_OFF + MDP_AD4_INTR_STATUS_OFF,
103 	},
104 	{
105 		MDP_AD4_1_OFF + MDP_AD4_INTR_CLEAR_OFF,
106 		MDP_AD4_1_OFF + MDP_AD4_INTR_EN_OFF,
107 		MDP_AD4_1_OFF + MDP_AD4_INTR_STATUS_OFF,
108 	},
109 	{
110 		MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_CLEAR,
111 		MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_EN,
112 		MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_STATUS
113 	},
114 	{
115 		MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_CLEAR,
116 		MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_EN,
117 		MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_STATUS
118 	},
119 	{
120 		MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_CLEAR,
121 		MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_EN,
122 		MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_STATUS
123 	},
124 };
125 
126 #define DPU_IRQ_REG(irq_idx)	(irq_idx / 32)
127 #define DPU_IRQ_MASK(irq_idx)	(BIT(irq_idx % 32))
128 
dpu_hw_intr_clear_intr_status_nolock(struct dpu_hw_intr * intr,int irq_idx)129 static void dpu_hw_intr_clear_intr_status_nolock(struct dpu_hw_intr *intr,
130 		int irq_idx)
131 {
132 	int reg_idx;
133 
134 	if (!intr)
135 		return;
136 
137 	reg_idx = DPU_IRQ_REG(irq_idx);
138 	DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off, DPU_IRQ_MASK(irq_idx));
139 
140 	/* ensure register writes go through */
141 	wmb();
142 }
143 
dpu_hw_intr_dispatch_irq(struct dpu_hw_intr * intr,void (* cbfunc)(void *,int),void * arg)144 static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr,
145 		void (*cbfunc)(void *, int),
146 		void *arg)
147 {
148 	int reg_idx;
149 	int irq_idx;
150 	u32 irq_status;
151 	u32 enable_mask;
152 	int bit;
153 	unsigned long irq_flags;
154 
155 	if (!intr)
156 		return;
157 
158 	/*
159 	 * The dispatcher will save the IRQ status before calling here.
160 	 * Now need to go through each IRQ status and find matching
161 	 * irq lookup index.
162 	 */
163 	spin_lock_irqsave(&intr->irq_lock, irq_flags);
164 	for (reg_idx = 0; reg_idx < ARRAY_SIZE(dpu_intr_set); reg_idx++) {
165 		if (!test_bit(reg_idx, &intr->irq_mask))
166 			continue;
167 
168 		/* Read interrupt status */
169 		irq_status = DPU_REG_READ(&intr->hw, dpu_intr_set[reg_idx].status_off);
170 
171 		/* Read enable mask */
172 		enable_mask = DPU_REG_READ(&intr->hw, dpu_intr_set[reg_idx].en_off);
173 
174 		/* and clear the interrupt */
175 		if (irq_status)
176 			DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
177 				     irq_status);
178 
179 		/* Finally update IRQ status based on enable mask */
180 		irq_status &= enable_mask;
181 
182 		if (!irq_status)
183 			continue;
184 
185 		/*
186 		 * Search through matching intr status.
187 		 */
188 		while ((bit = ffs(irq_status)) != 0) {
189 			irq_idx = DPU_IRQ_IDX(reg_idx, bit - 1);
190 			/*
191 			 * Once a match on irq mask, perform a callback
192 			 * to the given cbfunc. cbfunc will take care
193 			 * the interrupt status clearing. If cbfunc is
194 			 * not provided, then the interrupt clearing
195 			 * is here.
196 			 */
197 			if (cbfunc)
198 				cbfunc(arg, irq_idx);
199 
200 			dpu_hw_intr_clear_intr_status_nolock(intr, irq_idx);
201 
202 			/*
203 			 * When callback finish, clear the irq_status
204 			 * with the matching mask. Once irq_status
205 			 * is all cleared, the search can be stopped.
206 			 */
207 			irq_status &= ~BIT(bit - 1);
208 		}
209 	}
210 
211 	/* ensure register writes go through */
212 	wmb();
213 
214 	spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
215 }
216 
dpu_hw_intr_enable_irq_locked(struct dpu_hw_intr * intr,int irq_idx)217 static int dpu_hw_intr_enable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
218 {
219 	int reg_idx;
220 	const struct dpu_intr_reg *reg;
221 	const char *dbgstr = NULL;
222 	uint32_t cache_irq_mask;
223 
224 	if (!intr)
225 		return -EINVAL;
226 
227 	if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
228 		pr_err("invalid IRQ index: [%d]\n", irq_idx);
229 		return -EINVAL;
230 	}
231 
232 	/*
233 	 * The cache_irq_mask and hardware RMW operations needs to be done
234 	 * under irq_lock and it's the caller's responsibility to ensure that's
235 	 * held.
236 	 */
237 	assert_spin_locked(&intr->irq_lock);
238 
239 	reg_idx = DPU_IRQ_REG(irq_idx);
240 	reg = &dpu_intr_set[reg_idx];
241 
242 	cache_irq_mask = intr->cache_irq_mask[reg_idx];
243 	if (cache_irq_mask & DPU_IRQ_MASK(irq_idx)) {
244 		dbgstr = "DPU IRQ already set:";
245 	} else {
246 		dbgstr = "DPU IRQ enabled:";
247 
248 		cache_irq_mask |= DPU_IRQ_MASK(irq_idx);
249 		/* Cleaning any pending interrupt */
250 		DPU_REG_WRITE(&intr->hw, reg->clr_off, DPU_IRQ_MASK(irq_idx));
251 		/* Enabling interrupts with the new mask */
252 		DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
253 
254 		/* ensure register write goes through */
255 		wmb();
256 
257 		intr->cache_irq_mask[reg_idx] = cache_irq_mask;
258 	}
259 
260 	pr_debug("%s MASK:0x%.8lx, CACHE-MASK:0x%.8x\n", dbgstr,
261 			DPU_IRQ_MASK(irq_idx), cache_irq_mask);
262 
263 	return 0;
264 }
265 
dpu_hw_intr_disable_irq_locked(struct dpu_hw_intr * intr,int irq_idx)266 static int dpu_hw_intr_disable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
267 {
268 	int reg_idx;
269 	const struct dpu_intr_reg *reg;
270 	const char *dbgstr = NULL;
271 	uint32_t cache_irq_mask;
272 
273 	if (!intr)
274 		return -EINVAL;
275 
276 	if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
277 		pr_err("invalid IRQ index: [%d]\n", irq_idx);
278 		return -EINVAL;
279 	}
280 
281 	/*
282 	 * The cache_irq_mask and hardware RMW operations needs to be done
283 	 * under irq_lock and it's the caller's responsibility to ensure that's
284 	 * held.
285 	 */
286 	assert_spin_locked(&intr->irq_lock);
287 
288 	reg_idx = DPU_IRQ_REG(irq_idx);
289 	reg = &dpu_intr_set[reg_idx];
290 
291 	cache_irq_mask = intr->cache_irq_mask[reg_idx];
292 	if ((cache_irq_mask & DPU_IRQ_MASK(irq_idx)) == 0) {
293 		dbgstr = "DPU IRQ is already cleared:";
294 	} else {
295 		dbgstr = "DPU IRQ mask disable:";
296 
297 		cache_irq_mask &= ~DPU_IRQ_MASK(irq_idx);
298 		/* Disable interrupts based on the new mask */
299 		DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
300 		/* Cleaning any pending interrupt */
301 		DPU_REG_WRITE(&intr->hw, reg->clr_off, DPU_IRQ_MASK(irq_idx));
302 
303 		/* ensure register write goes through */
304 		wmb();
305 
306 		intr->cache_irq_mask[reg_idx] = cache_irq_mask;
307 	}
308 
309 	pr_debug("%s MASK:0x%.8lx, CACHE-MASK:0x%.8x\n", dbgstr,
310 			DPU_IRQ_MASK(irq_idx), cache_irq_mask);
311 
312 	return 0;
313 }
314 
dpu_hw_intr_clear_irqs(struct dpu_hw_intr * intr)315 static int dpu_hw_intr_clear_irqs(struct dpu_hw_intr *intr)
316 {
317 	int i;
318 
319 	if (!intr)
320 		return -EINVAL;
321 
322 	for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
323 		if (test_bit(i, &intr->irq_mask))
324 			DPU_REG_WRITE(&intr->hw,
325 					dpu_intr_set[i].clr_off, 0xffffffff);
326 	}
327 
328 	/* ensure register writes go through */
329 	wmb();
330 
331 	return 0;
332 }
333 
dpu_hw_intr_disable_irqs(struct dpu_hw_intr * intr)334 static int dpu_hw_intr_disable_irqs(struct dpu_hw_intr *intr)
335 {
336 	int i;
337 
338 	if (!intr)
339 		return -EINVAL;
340 
341 	for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
342 		if (test_bit(i, &intr->irq_mask))
343 			DPU_REG_WRITE(&intr->hw,
344 					dpu_intr_set[i].en_off, 0x00000000);
345 	}
346 
347 	/* ensure register writes go through */
348 	wmb();
349 
350 	return 0;
351 }
352 
dpu_hw_intr_get_interrupt_status(struct dpu_hw_intr * intr,int irq_idx,bool clear)353 static u32 dpu_hw_intr_get_interrupt_status(struct dpu_hw_intr *intr,
354 		int irq_idx, bool clear)
355 {
356 	int reg_idx;
357 	unsigned long irq_flags;
358 	u32 intr_status;
359 
360 	if (!intr)
361 		return 0;
362 
363 	if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
364 		pr_err("invalid IRQ index: [%d]\n", irq_idx);
365 		return 0;
366 	}
367 
368 	spin_lock_irqsave(&intr->irq_lock, irq_flags);
369 
370 	reg_idx = DPU_IRQ_REG(irq_idx);
371 	intr_status = DPU_REG_READ(&intr->hw,
372 			dpu_intr_set[reg_idx].status_off) &
373 		DPU_IRQ_MASK(irq_idx);
374 	if (intr_status && clear)
375 		DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
376 				intr_status);
377 
378 	/* ensure register writes go through */
379 	wmb();
380 
381 	spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
382 
383 	return intr_status;
384 }
385 
dpu_hw_intr_lock(struct dpu_hw_intr * intr)386 static unsigned long dpu_hw_intr_lock(struct dpu_hw_intr *intr)
387 {
388 	unsigned long irq_flags;
389 
390 	spin_lock_irqsave(&intr->irq_lock, irq_flags);
391 
392 	return irq_flags;
393 }
394 
dpu_hw_intr_unlock(struct dpu_hw_intr * intr,unsigned long irq_flags)395 static void dpu_hw_intr_unlock(struct dpu_hw_intr *intr, unsigned long irq_flags)
396 {
397 	spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
398 }
399 
__setup_intr_ops(struct dpu_hw_intr_ops * ops)400 static void __setup_intr_ops(struct dpu_hw_intr_ops *ops)
401 {
402 	ops->enable_irq_locked = dpu_hw_intr_enable_irq_locked;
403 	ops->disable_irq_locked = dpu_hw_intr_disable_irq_locked;
404 	ops->dispatch_irqs = dpu_hw_intr_dispatch_irq;
405 	ops->clear_all_irqs = dpu_hw_intr_clear_irqs;
406 	ops->disable_all_irqs = dpu_hw_intr_disable_irqs;
407 	ops->get_interrupt_status = dpu_hw_intr_get_interrupt_status;
408 	ops->lock = dpu_hw_intr_lock;
409 	ops->unlock = dpu_hw_intr_unlock;
410 }
411 
__intr_offset(struct dpu_mdss_cfg * m,void __iomem * addr,struct dpu_hw_blk_reg_map * hw)412 static void __intr_offset(struct dpu_mdss_cfg *m,
413 		void __iomem *addr, struct dpu_hw_blk_reg_map *hw)
414 {
415 	hw->base_off = addr;
416 	hw->blk_off = m->mdp[0].base;
417 	hw->hwversion = m->hwversion;
418 }
419 
dpu_hw_intr_init(void __iomem * addr,struct dpu_mdss_cfg * m)420 struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
421 		struct dpu_mdss_cfg *m)
422 {
423 	struct dpu_hw_intr *intr;
424 
425 	if (!addr || !m)
426 		return ERR_PTR(-EINVAL);
427 
428 	intr = kzalloc(sizeof(*intr), GFP_KERNEL);
429 	if (!intr)
430 		return ERR_PTR(-ENOMEM);
431 
432 	__intr_offset(m, addr, &intr->hw);
433 	__setup_intr_ops(&intr->ops);
434 
435 	intr->total_irqs = ARRAY_SIZE(dpu_intr_set) * 32;
436 
437 	intr->cache_irq_mask = kcalloc(ARRAY_SIZE(dpu_intr_set), sizeof(u32),
438 			GFP_KERNEL);
439 	if (intr->cache_irq_mask == NULL) {
440 		kfree(intr);
441 		return ERR_PTR(-ENOMEM);
442 	}
443 
444 	intr->irq_mask = m->mdss_irqs;
445 
446 	spin_lock_init(&intr->irq_lock);
447 
448 	return intr;
449 }
450 
dpu_hw_intr_destroy(struct dpu_hw_intr * intr)451 void dpu_hw_intr_destroy(struct dpu_hw_intr *intr)
452 {
453 	if (intr) {
454 		kfree(intr->cache_irq_mask);
455 		kfree(intr);
456 	}
457 }
458 
459