• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2 /*
3  * Copyright (c) 2021 Rockchip Electronics Co., Ltd
4  *
5  * author:
6  *	Herman Chen <herman.chen@rock-chips.com>
7  */
8 
9 #include <linux/delay.h>
10 #include <linux/interrupt.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/slab.h>
13 #include <soc/rockchip/pm_domains.h>
14 #include <soc/rockchip/rockchip_dmc.h>
15 #include <soc/rockchip/rockchip_iommu.h>
16 
17 #include "mpp_rkvdec2_link.h"
18 
19 #define WORK_TIMEOUT_MS		(200)
20 #define WAIT_TIMEOUT_MS		(500)
21 
22 #define RKVDEC_MAX_WRITE_PART	6
23 #define RKVDEC_MAX_READ_PART	2
24 
25 struct rkvdec_link_part {
26 	/* register offset of table buffer */
27 	u32 tb_reg_off;
28 	/* start idx of task register */
29 	u32 reg_start;
30 	/* number of task register */
31 	u32 reg_num;
32 };
33 
34 struct rkvdec_link_info {
35 	dma_addr_t iova;
36 	/* total register for link table buffer */
37 	u32 tb_reg_num;
38 	/* next link table addr in table buffer */
39 	u32 tb_reg_next;
40 	/* current read back addr in table buffer */
41 	u32 tb_reg_r;
42 	/* secondary enable in table buffer */
43 	u32 tb_reg_second_en;
44 	u32 part_w_num;
45 	u32 part_r_num;
46 
47 	struct rkvdec_link_part part_w[RKVDEC_MAX_WRITE_PART];
48 	struct rkvdec_link_part part_r[RKVDEC_MAX_READ_PART];
49 
50 	/* interrupt read back in table buffer */
51 	u32 tb_reg_int;
52 };
53 
54 static struct rkvdec_link_info rkvdec_link_v2_hw_info = {
55 	.tb_reg_num = 218,
56 	.tb_reg_next = 0,
57 	.tb_reg_r = 1,
58 	.tb_reg_second_en = 8,
59 
60 	.part_w_num = 6,
61 	.part_r_num = 2,
62 	.part_w[0] = {
63 		.tb_reg_off = 4,
64 		.reg_start = 8,
65 		.reg_num = 28,
66 	},
67 	.part_w[1] = {
68 		.tb_reg_off = 32,
69 		.reg_start = 64,
70 		.reg_num = 52,
71 	},
72 	.part_w[2] = {
73 		.tb_reg_off = 84,
74 		.reg_start = 128,
75 		.reg_num = 16,
76 	},
77 	.part_w[3] = {
78 		.tb_reg_off = 100,
79 		.reg_start = 160,
80 		.reg_num = 48,
81 	},
82 	.part_w[4] = {
83 		.tb_reg_off = 148,
84 		.reg_start = 224,
85 		.reg_num = 16,
86 	},
87 	.part_w[5] = {
88 		.tb_reg_off = 164,
89 		.reg_start = 256,
90 		.reg_num = 16,
91 	},
92 	.part_r[0] = {
93 		.tb_reg_off = 180,
94 		.reg_start = 224,
95 		.reg_num = 10,
96 	},
97 	.part_r[1] = {
98 		.tb_reg_off = 190,
99 		.reg_start = 258,
100 		.reg_num = 28,
101 	},
102 	.tb_reg_int = 180,
103 };
104 
rkvdec_link_status_update(struct rkvdec_link_dev * dev)105 static void rkvdec_link_status_update(struct rkvdec_link_dev *dev)
106 {
107 	void __iomem *reg_base = dev->reg_base;
108 	u32 error_ff0, error_ff1;
109 	u32 enable_ff0, enable_ff1;
110 	u32 loop_count = 10;
111 	u32 val;
112 
113 	error_ff1 = (readl(reg_base + RKVDEC_LINK_DEC_NUM_BASE) &
114 		    RKVDEC_LINK_BIT_DEC_ERROR) ? 1 : 0;
115 	enable_ff1 = readl(reg_base + RKVDEC_LINK_EN_BASE);
116 
117 	dev->irq_status = readl(reg_base + RKVDEC_LINK_IRQ_BASE);
118 	dev->iova_curr = readl(reg_base + RKVDEC_LINK_CFG_ADDR_BASE);
119 	dev->link_mode = readl(reg_base + RKVDEC_LINK_MODE_BASE);
120 	dev->total = readl(reg_base + RKVDEC_LINK_TOTAL_NUM_BASE);
121 	dev->iova_next = readl(reg_base + RKVDEC_LINK_NEXT_ADDR_BASE);
122 
123 	do {
124 		val = readl(reg_base + RKVDEC_LINK_DEC_NUM_BASE);
125 		error_ff0 = (val & RKVDEC_LINK_BIT_DEC_ERROR) ? 1 : 0;
126 		enable_ff0 = readl(reg_base + RKVDEC_LINK_EN_BASE);
127 
128 		if (error_ff0 == error_ff1 && enable_ff0 == enable_ff1)
129 			break;
130 
131 		error_ff1 = error_ff0;
132 		enable_ff1 = enable_ff0;
133 	} while (--loop_count);
134 
135 	dev->error = error_ff0;
136 	dev->decoded_status = val;
137 	dev->decoded = RKVDEC_LINK_GET_DEC_NUM(val);
138 	dev->enabled = enable_ff0;
139 
140 	if (!loop_count)
141 		dev_info(dev->dev, "reach last 10 count\n");
142 }
143 
rkvdec_link_node_dump(const char * func,struct rkvdec_link_dev * dev)144 static void rkvdec_link_node_dump(const char *func, struct rkvdec_link_dev *dev)
145 {
146 	u32 *table_base = (u32 *)dev->table->vaddr;
147 	u32 reg_count = dev->link_reg_count;
148 	u32 iova = (u32)dev->table->iova;
149 	u32 *reg = NULL;
150 	u32 i, j;
151 
152 	for (i = 0; i < dev->task_size; i++) {
153 		reg = table_base + i * reg_count;
154 
155 		mpp_err("slot %d link config iova %08x:\n", i,
156 			iova + i * dev->link_node_size);
157 
158 		for (j = 0; j < reg_count; j++) {
159 			mpp_err("reg%03d 0x%08x\n", j, reg[j]);
160 			udelay(100);
161 		}
162 	}
163 }
164 
rkvdec_core_reg_dump(const char * func,struct rkvdec_link_dev * dev)165 static void rkvdec_core_reg_dump(const char *func, struct rkvdec_link_dev *dev)
166 {
167 	struct mpp_dev *mpp = dev->mpp;
168 	u32 s = mpp->var->hw_info->reg_start;
169 	u32 e = mpp->var->hw_info->reg_end;
170 	u32 i;
171 
172 	mpp_err("--- dump hardware register ---\n");
173 
174 	for (i = s; i <= e; i++) {
175 		u32 reg = i * sizeof(u32);
176 
177 		mpp_err("reg[%03d]: %04x: 0x%08x\n",
178 			i, reg, readl_relaxed(mpp->reg_base + reg));
179 		udelay(100);
180 	}
181 }
182 
rkvdec_link_reg_dump(const char * func,struct rkvdec_link_dev * dev)183 static void rkvdec_link_reg_dump(const char *func, struct rkvdec_link_dev *dev)
184 {
185 	mpp_err("dump link config status from %s\n", func);
186 	mpp_err("reg 0 %08x - irq status\n", dev->irq_status);
187 	mpp_err("reg 1 %08x - cfg addr\n", dev->iova_curr);
188 	mpp_err("reg 2 %08x - link mode\n", dev->link_mode);
189 	mpp_err("reg 4 %08x - decoded num\n", dev->decoded_status);
190 	mpp_err("reg 5 %08x - total num\n", dev->total);
191 	mpp_err("reg 6 %08x - link mode en\n", dev->enabled);
192 	mpp_err("reg 6 %08x - next ltb addr\n", dev->iova_next);
193 }
194 
rkvdec_link_counter(const char * func,struct rkvdec_link_dev * dev)195 static void rkvdec_link_counter(const char *func, struct rkvdec_link_dev *dev)
196 {
197 	mpp_err("dump link counter from %s\n", func);
198 
199 	mpp_err("task write %d read %d send %d recv %d run %d decoded %d total %d\n",
200 		dev->task_write, dev->task_read, dev->task_send, dev->task_recv,
201 		dev->task_to_run, dev->task_decoded, dev->task_total);
202 }
203 
rkvdec_link_dump(struct mpp_dev * mpp)204 int rkvdec_link_dump(struct mpp_dev *mpp)
205 {
206 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
207 	struct rkvdec_link_dev *dev = dec->link_dec;
208 
209 	rkvdec_link_status_update(dev);
210 	rkvdec_link_reg_dump(__func__, dev);
211 	rkvdec_link_counter(__func__, dev);
212 	rkvdec_core_reg_dump(__func__, dev);
213 	rkvdec_link_node_dump(__func__, dev);
214 
215 	return 0;
216 }
217 
rkvdec_link_get_task_write(struct rkvdec_link_dev * dev)218 static int rkvdec_link_get_task_write(struct rkvdec_link_dev *dev)
219 {
220 	int idx = dev->task_write < dev->task_size ? dev->task_write :
221 		  dev->task_write - dev->task_size;
222 
223 	return idx;
224 }
rkvdec_link_inc_task_write(struct rkvdec_link_dev * dev)225 static int rkvdec_link_inc_task_write(struct rkvdec_link_dev *dev)
226 {
227 	int task_write = rkvdec_link_get_task_write(dev);
228 
229 	dev->task_write++;
230 	if (dev->task_write >= dev->task_size * 2)
231 		dev->task_write = 0;
232 
233 	return task_write;
234 }
rkvdec_link_get_task_read(struct rkvdec_link_dev * dev)235 static int rkvdec_link_get_task_read(struct rkvdec_link_dev *dev)
236 {
237 	int idx = dev->task_read < dev->task_size ? dev->task_read :
238 		  dev->task_read - dev->task_size;
239 
240 	return idx;
241 }
rkvdec_link_inc_task_read(struct rkvdec_link_dev * dev)242 static int rkvdec_link_inc_task_read(struct rkvdec_link_dev *dev)
243 {
244 	int task_read = rkvdec_link_get_task_read(dev);
245 
246 	dev->task_read++;
247 	if (dev->task_read >= dev->task_size * 2)
248 		dev->task_read = 0;
249 
250 	return task_read;
251 }
rkvdec_link_get_task_hw_queue_length(struct rkvdec_link_dev * dev)252 static int rkvdec_link_get_task_hw_queue_length(struct rkvdec_link_dev *dev)
253 {
254 	int len;
255 
256 	if (dev->task_send <= dev->task_recv)
257 		len = dev->task_send + dev->task_size - dev->task_recv;
258 	else
259 		len = dev->task_send - dev->task_recv - dev->task_size;
260 
261 	return len;
262 }
rkvdec_link_get_task_send(struct rkvdec_link_dev * dev)263 static int rkvdec_link_get_task_send(struct rkvdec_link_dev *dev)
264 {
265 	int idx = dev->task_send < dev->task_size ? dev->task_send :
266 		  dev->task_send - dev->task_size;
267 
268 	return idx;
269 }
rkvdec_link_inc_task_send(struct rkvdec_link_dev * dev)270 static int rkvdec_link_inc_task_send(struct rkvdec_link_dev *dev)
271 {
272 	int task_send = rkvdec_link_get_task_send(dev);
273 
274 	dev->task_send++;
275 	if (dev->task_send >= dev->task_size * 2)
276 		dev->task_send = 0;
277 
278 	return task_send;
279 }
rkvdec_link_inc_task_recv(struct rkvdec_link_dev * dev)280 static int rkvdec_link_inc_task_recv(struct rkvdec_link_dev *dev)
281 {
282 	int task_recv = dev->task_recv;
283 
284 	dev->task_recv++;
285 	if (dev->task_recv >= dev->task_size * 2)
286 		dev->task_recv = 0;
287 
288 	return task_recv;
289 }
290 
rkvdec_link_get_next_slot(struct rkvdec_link_dev * dev)291 static int rkvdec_link_get_next_slot(struct rkvdec_link_dev *dev)
292 {
293 	int next = -1;
294 
295 	if (dev->task_write == dev->task_read)
296 		return next;
297 
298 	next = rkvdec_link_get_task_write(dev);
299 
300 	return next;
301 }
302 
rkvdec_link_write_task_to_slot(struct rkvdec_link_dev * dev,int idx,struct mpp_task * mpp_task)303 static int rkvdec_link_write_task_to_slot(struct rkvdec_link_dev *dev, int idx,
304 					  struct mpp_task *mpp_task)
305 {
306 	u32 i, off, s, n;
307 	struct rkvdec_link_part *part;
308 	struct rkvdec_link_info *info;
309 	struct mpp_dma_buffer *table;
310 	struct rkvdec2_task *task;
311 	int slot_idx;
312 	u32 *tb_reg;
313 
314 	if (idx < 0 || idx >= dev->task_size) {
315 		mpp_err("send invalid task index %d\n", idx);
316 		return -1;
317 	}
318 
319 	info = dev->info;
320 	part = info->part_w;
321 	table = dev->table;
322 	task = to_rkvdec2_task(mpp_task);
323 
324 	slot_idx = rkvdec_link_inc_task_write(dev);
325 	if (idx != slot_idx)
326 		dev_info(dev->dev, "slot index mismatch %d vs %d\n",
327 			 idx, slot_idx);
328 
329 	if (task->need_hack) {
330 		tb_reg = (u32 *)table->vaddr + slot_idx * dev->link_reg_count;
331 
332 		/* setup error mode flag */
333 		dev->tasks_hw[slot_idx] = NULL;
334 		dev->task_to_run++;
335 		dev->task_prepared++;
336 		slot_idx = rkvdec_link_inc_task_write(dev);
337 	}
338 
339 	tb_reg = (u32 *)table->vaddr + slot_idx * dev->link_reg_count;
340 
341 	for (i = 0; i < info->part_w_num; i++) {
342 		off = part[i].tb_reg_off;
343 		s = part[i].reg_start;
344 		n = part[i].reg_num;
345 		memcpy(&tb_reg[off], &task->reg[s], n * sizeof(u32));
346 	}
347 
348 	/* setup error mode flag */
349 	tb_reg[9] |= BIT(18) | BIT(9);
350 	tb_reg[info->tb_reg_second_en] |= RKVDEC_WAIT_RESET_EN;
351 
352 	/* memset read registers */
353 	part = info->part_r;
354 	for (i = 0; i < info->part_r_num; i++) {
355 		off = part[i].tb_reg_off;
356 		n = part[i].reg_num;
357 		memset(&tb_reg[off], 0, n * sizeof(u32));
358 	}
359 
360 	dev->tasks_hw[slot_idx] = mpp_task;
361 	task->slot_idx = slot_idx;
362 	dev->task_to_run++;
363 	dev->task_prepared++;
364 	mpp_dbg_link_flow("slot %d write task %d\n", slot_idx,
365 			  mpp_task->task_id);
366 
367 	return 0;
368 }
369 
rkvdec2_clear_cache(struct mpp_dev * mpp)370 static void rkvdec2_clear_cache(struct mpp_dev *mpp)
371 {
372 	/* set cache size */
373 	u32 reg = RKVDEC_CACHE_PERMIT_CACHEABLE_ACCESS |
374 		  RKVDEC_CACHE_PERMIT_READ_ALLOCATE;
375 
376 	if (!mpp_debug_unlikely(DEBUG_CACHE_32B))
377 		reg |= RKVDEC_CACHE_LINE_SIZE_64_BYTES;
378 
379 	mpp_write_relaxed(mpp, RKVDEC_REG_CACHE0_SIZE_BASE, reg);
380 	mpp_write_relaxed(mpp, RKVDEC_REG_CACHE1_SIZE_BASE, reg);
381 	mpp_write_relaxed(mpp, RKVDEC_REG_CACHE2_SIZE_BASE, reg);
382 
383 	/* clear cache */
384 	mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE0_BASE, 1);
385 	mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE1_BASE, 1);
386 	mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE2_BASE, 1);
387 }
388 
rkvdec_link_send_task_to_hw(struct rkvdec_link_dev * dev,struct mpp_task * mpp_task,int slot_idx,u32 task_to_run,int resend)389 static int rkvdec_link_send_task_to_hw(struct rkvdec_link_dev *dev,
390 				       struct mpp_task *mpp_task,
391 				       int slot_idx, u32 task_to_run,
392 				       int resend)
393 {
394 	void __iomem *reg_base = dev->reg_base;
395 	struct mpp_dma_buffer *table = dev->table;
396 	u32 task_total = dev->task_total;
397 	u32 mode_start = 0;
398 	u32 val;
399 
400 	/* write address */
401 	if (!task_to_run || task_to_run > dev->task_size ||
402 	    slot_idx < 0 || slot_idx >= dev->task_size) {
403 		mpp_err("invalid task send cfg at %d count %d\n",
404 			slot_idx, task_to_run);
405 		rkvdec_link_counter("error on send", dev);
406 		return 0;
407 	}
408 
409 	val = task_to_run;
410 	if (!task_total || resend)
411 		mode_start = 1;
412 
413 	if (mode_start) {
414 		u32 iova = table->iova + slot_idx * dev->link_node_size;
415 
416 		rkvdec2_clear_cache(dev->mpp);
417 		/* cleanup counter in hardware */
418 		writel(0, reg_base + RKVDEC_LINK_MODE_BASE);
419 		/* start config before all registers are set */
420 		wmb();
421 		writel(RKVDEC_LINK_BIT_CFG_DONE, reg_base + RKVDEC_LINK_CFG_CTRL_BASE);
422 		/* write zero count config */
423 		wmb();
424 		/* clear counter and enable link mode hardware */
425 		writel(RKVDEC_LINK_BIT_EN, reg_base + RKVDEC_LINK_EN_BASE);
426 
427 		dev->task_total = 0;
428 		dev->task_decoded = 0;
429 
430 		writel_relaxed(iova, reg_base + RKVDEC_LINK_CFG_ADDR_BASE);
431 	} else {
432 		val |= RKVDEC_LINK_BIT_ADD_MODE;
433 	}
434 
435 	if (!resend) {
436 		u32 i;
437 
438 		for (i = 0; i < task_to_run; i++) {
439 			int next_idx = rkvdec_link_inc_task_send(dev);
440 			struct mpp_task *task_ddr = dev->tasks_hw[next_idx];
441 
442 			if (!task_ddr)
443 				continue;
444 
445 			set_bit(TASK_STATE_START, &task_ddr->state);
446 			schedule_delayed_work(&task_ddr->timeout_work,
447 					      msecs_to_jiffies(200));
448 		}
449 	} else {
450 		if (task_total)
451 			dev_info(dev->dev, "resend with total %d\n", task_total);
452 	}
453 
454 	/* set link mode */
455 	writel_relaxed(val, reg_base + RKVDEC_LINK_MODE_BASE);
456 
457 	/* start config before all registers are set */
458 	wmb();
459 
460 	/* configure done */
461 	writel(RKVDEC_LINK_BIT_CFG_DONE, reg_base + RKVDEC_LINK_CFG_CTRL_BASE);
462 
463 	mpp_dbg_link_flow("slot %d enable task %d mode %s\n", slot_idx,
464 			  task_to_run, mode_start ? "start" : "add");
465 	if (mode_start) {
466 		/* start hardware before all registers are set */
467 		wmb();
468 		/* clear counter and enable link mode hardware */
469 		writel(RKVDEC_LINK_BIT_EN, reg_base + RKVDEC_LINK_EN_BASE);
470 	}
471 
472 	dev->task_total += task_to_run;
473 	return 0;
474 }
475 
rkvdec2_link_finish(struct mpp_dev * mpp,struct mpp_task * mpp_task)476 static int rkvdec2_link_finish(struct mpp_dev *mpp, struct mpp_task *mpp_task)
477 {
478 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
479 	struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
480 	struct rkvdec_link_dev *link_dec = dec->link_dec;
481 	struct mpp_dma_buffer *table = link_dec->table;
482 	struct rkvdec_link_info *info = link_dec->info;
483 	struct rkvdec_link_part *part = info->part_r;
484 	int slot_idx = task->slot_idx;
485 	u32 *tb_reg = (u32 *)(table->vaddr + slot_idx * link_dec->link_node_size);
486 	u32 off, s, n;
487 	u32 i;
488 
489 	mpp_debug_enter();
490 
491 	for (i = 0; i < info->part_r_num; i++) {
492 		off = part[i].tb_reg_off;
493 		s = part[i].reg_start;
494 		n = part[i].reg_num;
495 		memcpy(&task->reg[s], &tb_reg[off], n * sizeof(u32));
496 	}
497 
498 	mpp_debug_leave();
499 
500 	return 0;
501 }
502 
rkvdec_link_isr_recv_task(struct mpp_dev * mpp,struct rkvdec_link_dev * link_dec,int count)503 static int rkvdec_link_isr_recv_task(struct mpp_dev *mpp,
504 				     struct rkvdec_link_dev *link_dec,
505 				     int count)
506 {
507 	struct rkvdec_link_info *info = link_dec->info;
508 	u32 *table_base = (u32 *)link_dec->table->vaddr;
509 	int i;
510 
511 	for (i = 0; i < count; i++) {
512 		int idx = rkvdec_link_get_task_read(link_dec);
513 		struct mpp_task *mpp_task = link_dec->tasks_hw[idx];
514 		struct rkvdec2_task *task = NULL;
515 		u32 *regs = NULL;
516 		u32 irq_status = 0;
517 
518 		if (!mpp_task) {
519 			regs = table_base + idx * link_dec->link_reg_count;
520 			mpp_dbg_link_flow("slot %d read  task stuff\n", idx);
521 
522 			link_dec->stuff_total++;
523 			if (link_dec->statistic_count &&
524 			    regs[RKVDEC_LINK_REG_CYCLE_CNT]) {
525 				link_dec->stuff_cycle_sum +=
526 					regs[RKVDEC_LINK_REG_CYCLE_CNT];
527 				link_dec->stuff_cnt++;
528 				if (link_dec->stuff_cnt >=
529 				    link_dec->statistic_count) {
530 					dev_info(
531 						link_dec->dev, "hw cycle %u\n",
532 						(u32)(link_dec->stuff_cycle_sum /
533 						      link_dec->statistic_count));
534 					link_dec->stuff_cycle_sum = 0;
535 					link_dec->stuff_cnt = 0;
536 				}
537 			}
538 
539 			if (link_dec->error && (i == (count - 1))) {
540 				link_dec->stuff_err++;
541 
542 				irq_status = mpp_read_relaxed(mpp, RKVDEC_REG_INT_EN);
543 				dev_info(link_dec->dev, "found stuff task error irq %08x %u/%u\n",
544 					 irq_status, link_dec->stuff_err,
545 					 link_dec->stuff_total);
546 
547 				if (link_dec->stuff_on_error) {
548 					dev_info(link_dec->dev, "stuff task error again %u/%u\n",
549 						 link_dec->stuff_err,
550 						 link_dec->stuff_total);
551 				}
552 
553 				link_dec->stuff_on_error = 1;
554 				/* resend task */
555 				link_dec->decoded--;
556 			} else {
557 				link_dec->stuff_on_error = 0;
558 				rkvdec_link_inc_task_recv(link_dec);
559 				rkvdec_link_inc_task_read(link_dec);
560 				link_dec->task_running--;
561 				link_dec->task_prepared--;
562 			}
563 
564 			continue;
565 		}
566 
567 		task = to_rkvdec2_task(mpp_task);
568 		regs = table_base + idx * link_dec->link_reg_count;
569 		irq_status = regs[info->tb_reg_int];
570 		mpp_dbg_link_flow("slot %d rd task %d\n", idx,
571 				  mpp_task->task_id);
572 
573 		task->irq_status = irq_status;
574 
575 		cancel_delayed_work_sync(&mpp_task->timeout_work);
576 		set_bit(TASK_STATE_HANDLE, &mpp_task->state);
577 
578 		if (link_dec->statistic_count &&
579 		    regs[RKVDEC_LINK_REG_CYCLE_CNT]) {
580 			link_dec->task_cycle_sum +=
581 				regs[RKVDEC_LINK_REG_CYCLE_CNT];
582 			link_dec->task_cnt++;
583 			if (link_dec->task_cnt >= link_dec->statistic_count) {
584 				dev_info(link_dec->dev, "hw cycle %u\n",
585 					 (u32)(link_dec->task_cycle_sum /
586 					       link_dec->statistic_count));
587 				link_dec->task_cycle_sum = 0;
588 				link_dec->task_cnt = 0;
589 			}
590 		}
591 
592 		rkvdec2_link_finish(mpp, mpp_task);
593 
594 		set_bit(TASK_STATE_FINISH, &mpp_task->state);
595 
596 		list_del_init(&mpp_task->queue_link);
597 		link_dec->task_running--;
598 		link_dec->task_prepared--;
599 
600 		rkvdec_link_inc_task_recv(link_dec);
601 		rkvdec_link_inc_task_read(link_dec);
602 
603 		if (test_bit(TASK_STATE_ABORT, &mpp_task->state))
604 			set_bit(TASK_STATE_ABORT_READY, &mpp_task->state);
605 
606 		set_bit(TASK_STATE_PROC_DONE, &mpp_task->state);
607 		/* Wake up the GET thread */
608 		wake_up(&task->wait);
609 	}
610 
611 	return 0;
612 }
613 
rkvdec2_link_prepare(struct mpp_dev * mpp,struct mpp_task * mpp_task)614 static void *rkvdec2_link_prepare(struct mpp_dev *mpp,
615 				  struct mpp_task *mpp_task)
616 {
617 	struct mpp_task *out_task = NULL;
618 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
619 	struct rkvdec_link_dev *link_dec = dec->link_dec;
620 	int ret = 0;
621 	int slot_idx;
622 
623 	mpp_debug_enter();
624 
625 	slot_idx = rkvdec_link_get_next_slot(link_dec);
626 	if (slot_idx < 0) {
627 		mpp_err("capacity %d running %d\n",
628 			mpp->task_capacity, link_dec->task_running);
629 		dev_err(link_dec->dev, "no slot to write on get next slot\n");
630 		goto done;
631 	}
632 
633 	ret = rkvdec_link_write_task_to_slot(link_dec, slot_idx, mpp_task);
634 	if (ret >= 0)
635 		out_task = mpp_task;
636 	else
637 		dev_err(mpp->dev, "no slot to write\n");
638 
639 done:
640 	mpp_debug_leave();
641 
642 	return out_task;
643 }
644 
rkvdec2_link_reset(struct mpp_dev * mpp)645 static int rkvdec2_link_reset(struct mpp_dev *mpp)
646 {
647 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
648 
649 	dev_info(mpp->dev, "resetting...\n");
650 
651 	/* FIXME lock resource lock of the other devices in combo */
652 	mpp_iommu_down_write(mpp->iommu_info);
653 	mpp_reset_down_write(mpp->reset_group);
654 	atomic_set(&mpp->reset_request, 0);
655 
656 	rockchip_save_qos(mpp->dev);
657 
658 	mutex_lock(&dec->sip_reset_lock);
659 	rockchip_dmcfreq_lock();
660 	sip_smc_vpu_reset(0, 0, 0);
661 	rockchip_dmcfreq_unlock();
662 	mutex_unlock(&dec->sip_reset_lock);
663 
664 	rockchip_restore_qos(mpp->dev);
665 
666 	/* Note: if the domain does not change, iommu attach will be return
667 	 * as an empty operation. Therefore, force to close and then open,
668 	 * will be update the domain. In this way, domain can really attach.
669 	 */
670 	mpp_iommu_refresh(mpp->iommu_info, mpp->dev);
671 
672 	mpp_reset_up_write(mpp->reset_group);
673 	mpp_iommu_up_write(mpp->iommu_info);
674 
675 	dev_info(mpp->dev, "reset done\n");
676 
677 	return 0;
678 }
679 
rkvdec2_link_irq(struct mpp_dev * mpp)680 static int rkvdec2_link_irq(struct mpp_dev *mpp)
681 {
682 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
683 	struct rkvdec_link_dev *link_dec = dec->link_dec;
684 	u32 irq_status = 0;
685 
686 	if (!atomic_read(&link_dec->power_enabled)) {
687 		dev_info(link_dec->dev, "irq on power off\n");
688 		return -1;
689 	}
690 
691 	irq_status = readl(link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
692 
693 	if (irq_status & RKVDEC_LINK_BIT_IRQ_RAW) {
694 		u32 enabled = readl(link_dec->reg_base + RKVDEC_LINK_EN_BASE);
695 
696 		if (!enabled) {
697 			u32 bus = mpp_read_relaxed(mpp, 273 * 4);
698 
699 			if (bus & 0x7ffff)
700 				dev_info(link_dec->dev,
701 					 "invalid bus status %08x\n", bus);
702 		}
703 
704 		link_dec->irq_status = irq_status;
705 		mpp->irq_status = mpp_read_relaxed(mpp, RKVDEC_REG_INT_EN);
706 
707 		writel_relaxed(0, link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
708 	}
709 
710 	mpp_debug(DEBUG_IRQ_STATUS | DEBUG_LINK_TABLE, "irq_status: %08x : %08x\n",
711 		  irq_status, mpp->irq_status);
712 
713 	return 0;
714 }
715 
rkvdec2_link_isr(struct mpp_dev * mpp)716 static int rkvdec2_link_isr(struct mpp_dev *mpp)
717 {
718 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
719 	struct rkvdec_link_dev *link_dec = dec->link_dec;
720 	/* keep irq_status */
721 	u32 irq_status = link_dec->irq_status;
722 	u32 prev_dec_num;
723 	int count = 0;
724 	u32 len = 0;
725 	u32 need_reset = atomic_read(&mpp->reset_request);
726 	u32 task_timeout = link_dec->task_on_timeout;
727 
728 	mpp_debug_enter();
729 
730 	disable_irq(mpp->irq);
731 	rkvdec_link_status_update(link_dec);
732 	link_dec->irq_status = irq_status;
733 	prev_dec_num = link_dec->task_decoded;
734 
735 	if (!link_dec->enabled || task_timeout) {
736 		u32 val;
737 
738 		if (task_timeout)
739 			rkvdec_link_reg_dump("timeout", link_dec);
740 
741 		val = mpp_read(mpp, 224 * 4);
742 		if (!(val & BIT(2))) {
743 			dev_info(mpp->dev, "frame not complete\n");
744 			link_dec->decoded++;
745 		}
746 	}
747 	count = (int)link_dec->decoded - (int)prev_dec_num;
748 
749 	/* handle counter wrap */
750 	if (link_dec->enabled && !count && !need_reset) {
751 		/* process extra isr when task is processed */
752 		enable_irq(mpp->irq);
753 		goto done;
754 	}
755 
756 	/* get previous ready task */
757 	if (count) {
758 		rkvdec_link_isr_recv_task(mpp, link_dec, count);
759 		link_dec->task_decoded = link_dec->decoded;
760 	}
761 
762 	if (!link_dec->enabled || need_reset)
763 		goto do_reset;
764 
765 	enable_irq(mpp->irq);
766 	goto done;
767 
768 do_reset:
769 	/* NOTE: irq may run with reset */
770 	atomic_inc(&mpp->reset_request);
771 	rkvdec2_link_reset(mpp);
772 	link_dec->task_decoded = 0;
773 	link_dec->task_total = 0;
774 	enable_irq(mpp->irq);
775 
776 	if (link_dec->total == link_dec->decoded)
777 		goto done;
778 
779 	len = rkvdec_link_get_task_hw_queue_length(link_dec);
780 	if (len > link_dec->task_size)
781 		rkvdec_link_counter("invalid len", link_dec);
782 
783 	if (len) {
784 		int slot_idx = rkvdec_link_get_task_read(link_dec);
785 		struct mpp_task *mpp_task = NULL;
786 
787 		mpp_task = link_dec->tasks_hw[slot_idx];
788 		rkvdec_link_send_task_to_hw(link_dec, mpp_task,
789 					    slot_idx, len, 1);
790 	}
791 
792 done:
793 	mpp_debug_leave();
794 
795 	return IRQ_HANDLED;
796 }
797 
rkvdec2_link_remove(struct mpp_dev * mpp,struct rkvdec_link_dev * link_dec)798 int rkvdec2_link_remove(struct mpp_dev *mpp, struct rkvdec_link_dev *link_dec)
799 {
800 	mpp_debug_enter();
801 
802 	if (link_dec && link_dec->table) {
803 		mpp_dma_free(link_dec->table);
804 		link_dec->table = NULL;
805 	}
806 
807 	mpp_debug_leave();
808 
809 	return 0;
810 }
811 
rkvdec2_link_alloc_table(struct mpp_dev * mpp,struct rkvdec_link_dev * link_dec)812 static int rkvdec2_link_alloc_table(struct mpp_dev *mpp,
813 				    struct rkvdec_link_dev *link_dec)
814 {
815 	int ret;
816 	struct mpp_dma_buffer *table;
817 	struct rkvdec_link_info *info = link_dec->info;
818 	/* NOTE: link table address requires 64 align */
819 	u32 task_capacity = link_dec->task_capacity;
820 	u32 link_node_size = ALIGN(info->tb_reg_num * sizeof(u32), 256);
821 	u32 link_info_size = task_capacity * link_node_size;
822 	u32 *v_curr;
823 	u32 io_curr, io_next, io_start;
824 	u32 offset_r = info->part_r[0].tb_reg_off * sizeof(u32);
825 	u32 i;
826 
827 	table = mpp_dma_alloc(mpp->dev, link_info_size);
828 	if (!table) {
829 		ret = -ENOMEM;
830 		goto err_free_node;
831 	}
832 
833 	link_dec->link_node_size = link_node_size;
834 	link_dec->link_reg_count = link_node_size >> 2;
835 	io_start = table->iova;
836 
837 	for (i = 0; i < task_capacity; i++) {
838 		v_curr  = (u32 *)(table->vaddr + i * link_node_size);
839 		io_curr = io_start + i * link_node_size;
840 		io_next = (i == task_capacity - 1) ?
841 			  io_start : io_start + (i + 1) * link_node_size;
842 
843 		v_curr[info->tb_reg_next] = io_next;
844 		v_curr[info->tb_reg_r] = io_curr + offset_r;
845 	}
846 
847 	link_dec->table	     = table;
848 	link_dec->task_size  = task_capacity;
849 	link_dec->task_count = 0;
850 	link_dec->task_write = 0;
851 	link_dec->task_read  = link_dec->task_size;
852 	link_dec->task_send  = 0;
853 	link_dec->task_recv  = link_dec->task_size;
854 
855 	return 0;
856 err_free_node:
857 	rkvdec2_link_remove(mpp, link_dec);
858 	return ret;
859 }
860 
861 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
rkvdec2_link_procfs_init(struct mpp_dev * mpp)862 int rkvdec2_link_procfs_init(struct mpp_dev *mpp)
863 {
864 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
865 	struct rkvdec_link_dev *link_dec = dec->link_dec;
866 
867 	if (!link_dec)
868 		return 0;
869 
870 	link_dec->statistic_count = 0;
871 
872 	if (dec->procfs)
873 		mpp_procfs_create_u32("statistic_count", 0644,
874 				      dec->procfs, &link_dec->statistic_count);
875 
876 	return 0;
877 }
878 #else
rkvdec2_link_procfs_init(struct mpp_dev * mpp)879 int rkvdec2_link_procfs_init(struct mpp_dev *mpp)
880 {
881 	return 0;
882 }
883 #endif
884 
rkvdec2_link_init(struct platform_device * pdev,struct rkvdec2_dev * dec)885 int rkvdec2_link_init(struct platform_device *pdev, struct rkvdec2_dev *dec)
886 {
887 	int ret;
888 	struct resource *res = NULL;
889 	struct rkvdec_link_dev *link_dec = NULL;
890 	struct device *dev = &pdev->dev;
891 	struct mpp_dev *mpp = &dec->mpp;
892 
893 	mpp_debug_enter();
894 
895 	link_dec = devm_kzalloc(dev, sizeof(*link_dec), GFP_KERNEL);
896 	if (!link_dec) {
897 		ret = -ENOMEM;
898 		goto done;
899 	}
900 
901 	link_dec->tasks_hw = devm_kzalloc(dev, sizeof(*link_dec->tasks_hw) *
902 					  mpp->task_capacity, GFP_KERNEL);
903 	if (!link_dec->tasks_hw) {
904 		ret = -ENOMEM;
905 		goto done;
906 	}
907 
908 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "link");
909 	if (res)
910 		link_dec->info = &rkvdec_link_v2_hw_info;
911 	else {
912 		dev_err(dev, "link mode resource not found\n");
913 		ret = -ENOMEM;
914 		goto done;
915 	}
916 
917 	link_dec->reg_base = devm_ioremap(dev, res->start, resource_size(res));
918 	if (!link_dec->reg_base) {
919 		dev_err(dev, "ioremap failed for resource %pR\n", res);
920 		ret = -ENOMEM;
921 		goto done;
922 	}
923 
924 	link_dec->task_capacity = mpp->task_capacity;
925 	ret = rkvdec2_link_alloc_table(&dec->mpp, link_dec);
926 	if (ret)
927 		goto done;
928 
929 	link_dec->mpp = mpp;
930 	link_dec->dev = dev;
931 	atomic_set(&link_dec->task_timeout, 0);
932 	atomic_set(&link_dec->power_enabled, 0);
933 	link_dec->irq_enabled = 1;
934 
935 	dec->link_dec = link_dec;
936 	dev_info(dev, "link mode probe finish\n");
937 
938 done:
939 	if (ret) {
940 		if (link_dec) {
941 			if (link_dec->reg_base) {
942 				devm_iounmap(dev, link_dec->reg_base);
943 				link_dec->reg_base = NULL;
944 			}
945 			if (link_dec->tasks_hw) {
946 				devm_kfree(dev, link_dec->tasks_hw);
947 				link_dec->tasks_hw = NULL;
948 			}
949 
950 			devm_kfree(dev, link_dec);
951 			link_dec = NULL;
952 		}
953 		dec->link_dec = NULL;
954 	}
955 	mpp_debug_leave();
956 
957 	return ret;
958 }
959 
rkvdec2_link_free_task(struct kref * ref)960 static void rkvdec2_link_free_task(struct kref *ref)
961 {
962 	struct mpp_dev *mpp;
963 	struct mpp_session *session;
964 	struct mpp_task *task = container_of(ref, struct mpp_task, ref);
965 
966 	if (!task->session) {
967 		mpp_err("task %d task->session is null.\n", task->task_id);
968 		return;
969 	}
970 	session = task->session;
971 
972 	mpp_debug_func(DEBUG_TASK_INFO, "task %d:%d state 0x%lx\n",
973 		       session->index, task->task_id, task->state);
974 	if (!session->mpp) {
975 		mpp_err("session %d session->mpp is null.\n", session->index);
976 		return;
977 	}
978 	mpp = session->mpp;
979 	list_del_init(&task->queue_link);
980 
981 	rkvdec2_free_task(session, task);
982 	/* Decrease reference count */
983 	atomic_dec(&session->task_count);
984 	atomic_dec(&mpp->task_count);
985 }
986 
rkvdec2_link_trigger_work(struct mpp_dev * mpp)987 static void rkvdec2_link_trigger_work(struct mpp_dev *mpp)
988 {
989 	kthread_queue_work(&mpp->queue->worker, &mpp->work);
990 }
991 
rkvdec2_link_trigger_timeout(struct mpp_dev * mpp)992 static void rkvdec2_link_trigger_timeout(struct mpp_dev *mpp)
993 {
994 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
995 	struct rkvdec_link_dev *link_dec = dec->link_dec;
996 
997 	atomic_inc(&link_dec->task_timeout);
998 	rkvdec2_link_trigger_work(mpp);
999 }
1000 
rkvdec2_link_trigger_irq(struct mpp_dev * mpp)1001 static void rkvdec2_link_trigger_irq(struct mpp_dev *mpp)
1002 {
1003 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1004 	struct rkvdec_link_dev *link_dec = dec->link_dec;
1005 
1006 	link_dec->task_irq++;
1007 	rkvdec2_link_trigger_work(mpp);
1008 }
1009 
rkvdec2_link_power_on(struct mpp_dev * mpp)1010 static int rkvdec2_link_power_on(struct mpp_dev *mpp)
1011 {
1012 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1013 	struct rkvdec_link_dev *link_dec = dec->link_dec;
1014 
1015 	if (!atomic_xchg(&link_dec->power_enabled, 1)) {
1016 		if (mpp_iommu_attach(mpp->iommu_info)) {
1017 			dev_err(mpp->dev, "mpp_iommu_attach failed\n");
1018 			return -ENODATA;
1019 		}
1020 		pm_runtime_get_sync(mpp->dev);
1021 		pm_stay_awake(mpp->dev);
1022 
1023 		if (mpp->hw_ops->clk_on)
1024 			mpp->hw_ops->clk_on(mpp);
1025 
1026 		if (!link_dec->irq_enabled) {
1027 			enable_irq(mpp->irq);
1028 			link_dec->irq_enabled = 1;
1029 		}
1030 	}
1031 	return 0;
1032 }
1033 
rkvdec2_link_power_off(struct mpp_dev * mpp)1034 static void rkvdec2_link_power_off(struct mpp_dev *mpp)
1035 {
1036 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1037 	struct rkvdec_link_dev *link_dec = dec->link_dec;
1038 
1039 	if (atomic_xchg(&link_dec->power_enabled, 0)) {
1040 		disable_irq(mpp->irq);
1041 		link_dec->irq_enabled = 0;
1042 
1043 		if (mpp->hw_ops->clk_off)
1044 			mpp->hw_ops->clk_off(mpp);
1045 
1046 		pm_relax(mpp->dev);
1047 		pm_runtime_put_sync_suspend(mpp->dev);
1048 
1049 		link_dec->task_decoded = 0;
1050 		link_dec->task_total = 0;
1051 	}
1052 }
1053 
rkvdec2_link_timeout_proc(struct work_struct * work_s)1054 static void rkvdec2_link_timeout_proc(struct work_struct *work_s)
1055 {
1056 	struct mpp_dev *mpp;
1057 	struct mpp_session *session;
1058 	struct mpp_task *task = container_of(to_delayed_work(work_s),
1059 					     struct mpp_task, timeout_work);
1060 
1061 	if (test_and_set_bit(TASK_STATE_HANDLE, &task->state)) {
1062 		mpp_err("task %d state %lx has been handled\n",
1063 			task->task_id, task->state);
1064 		return;
1065 	}
1066 
1067 	if (!task->session) {
1068 		mpp_err("task %d session is null.\n", task->task_id);
1069 		return;
1070 	}
1071 	session = task->session;
1072 
1073 	if (!session->mpp) {
1074 		mpp_err("task %d:%d mpp is null.\n", session->index,
1075 			task->task_id);
1076 		return;
1077 	}
1078 	mpp = session->mpp;
1079 	set_bit(TASK_STATE_TIMEOUT, &task->state);
1080 	rkvdec2_link_trigger_timeout(mpp);
1081 }
1082 
mpp_taskqueue_scan_pending_abort_task(struct mpp_taskqueue * queue)1083 static void mpp_taskqueue_scan_pending_abort_task(struct mpp_taskqueue *queue)
1084 {
1085 	struct mpp_task *task, *n;
1086 
1087 	mutex_lock(&queue->pending_lock);
1088 	/* Check and pop all timeout task */
1089 	list_for_each_entry_safe(task, n, &queue->pending_list, queue_link) {
1090 		struct mpp_session *session = task->session;
1091 
1092 		if (test_bit(TASK_STATE_ABORT, &task->state)) {
1093 			mutex_lock(&session->pending_lock);
1094 			/* wait and signal */
1095 			list_del_init(&task->queue_link);
1096 			mutex_unlock(&session->pending_lock);
1097 			kref_put(&task->ref, rkvdec2_link_free_task);
1098 		}
1099 	}
1100 	mutex_unlock(&queue->pending_lock);
1101 }
1102 
rkvdec2_link_try_dequeue(struct mpp_dev * mpp)1103 static void rkvdec2_link_try_dequeue(struct mpp_dev *mpp)
1104 {
1105 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1106 	struct rkvdec_link_dev *link_dec = dec->link_dec;
1107 	struct mpp_task *task;
1108 	struct mpp_taskqueue *queue = mpp->queue;
1109 	int task_irq = link_dec->task_irq;
1110 	int task_irq_prev = link_dec->task_irq_prev;
1111 	int task_timeout = atomic_read(&link_dec->task_timeout);
1112 
1113 	if (!link_dec->task_running)
1114 		goto done;
1115 
1116 	if (task_timeout != link_dec->task_timeout_prev) {
1117 		dev_info(link_dec->dev, "process task timeout\n");
1118 		atomic_inc(&mpp->reset_request);
1119 		link_dec->task_on_timeout =
1120 			task_timeout - link_dec->task_timeout_prev;
1121 		goto proc;
1122 	}
1123 
1124 	if (task_irq == task_irq_prev)
1125 		goto done;
1126 
1127 	if (!atomic_read(&link_dec->power_enabled)) {
1128 		dev_info(link_dec->dev, "dequeue on power off\n");
1129 		goto done;
1130 	}
1131 
1132 proc:
1133 	task = list_first_entry_or_null(&queue->running_list, struct mpp_task,
1134 					queue_link);
1135 	if (!task) {
1136 		mpp_err("can found task on trydequeue with %d running task\n",
1137 			link_dec->task_running);
1138 		goto done;
1139 	}
1140 
1141 	/* Check and process all finished task */
1142 	rkvdec2_link_isr(mpp);
1143 
1144 done:
1145 	link_dec->task_irq_prev = task_irq;
1146 	link_dec->task_timeout_prev = task_timeout;
1147 	link_dec->task_on_timeout = 0;
1148 
1149 	mpp_taskqueue_scan_pending_abort_task(queue);
1150 
1151 	/* TODO: if reset is needed do reset here */
1152 }
1153 
mpp_task_queue(struct mpp_dev * mpp,struct mpp_task * task)1154 static int mpp_task_queue(struct mpp_dev *mpp, struct mpp_task *task)
1155 {
1156 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1157 	struct rkvdec_link_dev *link_dec = dec->link_dec;
1158 	u32 task_to_run = 0;
1159 	int slot_idx = 0;
1160 
1161 	mpp_debug_enter();
1162 
1163 	rkvdec2_link_power_on(mpp);
1164 	mpp_time_record(task);
1165 	mpp_debug(DEBUG_TASK_INFO, "pid %d, start hw %s\n",
1166 		  task->session->pid, dev_name(mpp->dev));
1167 
1168 	/* prepare the task for running */
1169 	if (test_and_set_bit(TASK_STATE_PREPARE, &task->state))
1170 		mpp_err("task %d has been prepare twice\n", task->task_id);
1171 
1172 	rkvdec2_link_prepare(mpp, task);
1173 
1174 	task_to_run = link_dec->task_to_run;
1175 	if (!task_to_run) {
1176 		dev_err(link_dec->dev, "nothing to run\n");
1177 		goto done;
1178 	}
1179 
1180 	mpp_reset_down_read(mpp->reset_group);
1181 	link_dec->task_to_run = 0;
1182 	slot_idx = rkvdec_link_get_task_send(link_dec);
1183 	link_dec->task_running += task_to_run;
1184 	rkvdec_link_send_task_to_hw(link_dec, task, slot_idx, task_to_run, 0);
1185 
1186 done:
1187 	mpp_debug_leave();
1188 
1189 	return 0;
1190 }
1191 
rkvdec2_link_irq_proc(int irq,void * param)1192 irqreturn_t rkvdec2_link_irq_proc(int irq, void *param)
1193 {
1194 	struct mpp_dev *mpp = param;
1195 	int ret = rkvdec2_link_irq(mpp);
1196 
1197 	if (!ret)
1198 		rkvdec2_link_trigger_irq(mpp);
1199 
1200 	return IRQ_HANDLED;
1201 }
1202 
1203 static struct mpp_task *
mpp_session_get_pending_task(struct mpp_session * session)1204 mpp_session_get_pending_task(struct mpp_session *session)
1205 {
1206 	struct mpp_task *task = NULL;
1207 
1208 	mutex_lock(&session->pending_lock);
1209 	task = list_first_entry_or_null(&session->pending_list, struct mpp_task,
1210 					pending_link);
1211 	mutex_unlock(&session->pending_lock);
1212 
1213 	return task;
1214 }
1215 
task_is_done(struct mpp_task * task)1216 static int task_is_done(struct mpp_task *task)
1217 {
1218 	return test_bit(TASK_STATE_PROC_DONE, &task->state);
1219 }
1220 
mpp_session_pop_pending(struct mpp_session * session,struct mpp_task * task)1221 static int mpp_session_pop_pending(struct mpp_session *session,
1222 				   struct mpp_task *task)
1223 {
1224 	mutex_lock(&session->pending_lock);
1225 	list_del_init(&task->pending_link);
1226 	mutex_unlock(&session->pending_lock);
1227 	kref_put(&task->ref, rkvdec2_link_free_task);
1228 
1229 	return 0;
1230 }
1231 
mpp_session_pop_done(struct mpp_session * session,struct mpp_task * task)1232 static int mpp_session_pop_done(struct mpp_session *session,
1233 				struct mpp_task *task)
1234 {
1235 	set_bit(TASK_STATE_DONE, &task->state);
1236 	kref_put(&task->ref, rkvdec2_link_free_task);
1237 
1238 	return 0;
1239 }
1240 
rkvdec2_link_process_task(struct mpp_session * session,struct mpp_task_msgs * msgs)1241 int rkvdec2_link_process_task(struct mpp_session *session,
1242 			      struct mpp_task_msgs *msgs)
1243 {
1244 	struct mpp_task *task = NULL;
1245 	struct mpp_dev *mpp = session->mpp;
1246 
1247 	task = rkvdec2_alloc_task(session, msgs);
1248 	if (!task) {
1249 		mpp_err("alloc_task failed.\n");
1250 		return -ENOMEM;
1251 	}
1252 
1253 	kref_init(&task->ref);
1254 	atomic_set(&task->abort_request, 0);
1255 	task->task_index = atomic_fetch_inc(&mpp->task_index);
1256 	task->task_id = atomic_fetch_inc(&mpp->queue->task_id);
1257 	INIT_DELAYED_WORK(&task->timeout_work, rkvdec2_link_timeout_proc);
1258 
1259 	atomic_inc(&session->task_count);
1260 
1261 	kref_get(&task->ref);
1262 	mutex_lock(&session->pending_lock);
1263 	list_add_tail(&task->pending_link, &session->pending_list);
1264 	mutex_unlock(&session->pending_lock);
1265 
1266 	kref_get(&task->ref);
1267 	mutex_lock(&mpp->queue->pending_lock);
1268 	list_add_tail(&task->queue_link, &mpp->queue->pending_list);
1269 	mutex_unlock(&mpp->queue->pending_lock);
1270 
1271 	/* push current task to queue */
1272 	atomic_inc(&mpp->task_count);
1273 	set_bit(TASK_STATE_PENDING, &task->state);
1274 	/* trigger current queue to run task */
1275 	rkvdec2_link_trigger_work(mpp);
1276 	kref_put(&task->ref, rkvdec2_link_free_task);
1277 
1278 	return 0;
1279 }
1280 
rkvdec2_link_wait_result(struct mpp_session * session,struct mpp_task_msgs * msgs)1281 int rkvdec2_link_wait_result(struct mpp_session *session,
1282 			     struct mpp_task_msgs *msgs)
1283 {
1284 	struct mpp_dev *mpp = session->mpp;
1285 	struct mpp_task *mpp_task;
1286 	struct rkvdec2_task *task;
1287 	int ret;
1288 
1289 	mpp_task = mpp_session_get_pending_task(session);
1290 	if (!mpp_task) {
1291 		mpp_err("session %p pending list is empty!\n", session);
1292 		return -EIO;
1293 	}
1294 
1295 	task = to_rkvdec2_task(mpp_task);
1296 	ret = wait_event_timeout(task->wait, task_is_done(mpp_task),
1297 				 msecs_to_jiffies(WAIT_TIMEOUT_MS));
1298 	if (ret) {
1299 		ret = rkvdec2_result(mpp, mpp_task, msgs);
1300 
1301 		mpp_session_pop_done(session, mpp_task);
1302 	} else {
1303 		mpp_err("task %d:%d statue %lx timeout -> abort\n",
1304 			session->index, mpp_task->task_id, mpp_task->state);
1305 
1306 		atomic_inc(&mpp_task->abort_request);
1307 		set_bit(TASK_STATE_ABORT, &mpp_task->state);
1308 	}
1309 
1310 	mpp_session_pop_pending(session, mpp_task);
1311 	return ret;
1312 }
1313 
rkvdec2_link_worker(struct kthread_work * work_s)1314 void rkvdec2_link_worker(struct kthread_work *work_s)
1315 {
1316 	struct mpp_dev *mpp = container_of(work_s, struct mpp_dev, work);
1317 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1318 	struct rkvdec_link_dev *link_dec = dec->link_dec;
1319 	struct mpp_task *task;
1320 	struct mpp_taskqueue *queue = mpp->queue;
1321 
1322 	mpp_debug_enter();
1323 
1324 	/*
1325 	 * process timeout and finished task.
1326 	 */
1327 	rkvdec2_link_try_dequeue(mpp);
1328 
1329 again:
1330 	if (atomic_read(&mpp->reset_request)) {
1331 		if (link_dec->task_running || link_dec->task_prepared)
1332 			goto done;
1333 
1334 		disable_irq(mpp->irq);
1335 		rkvdec2_link_reset(mpp);
1336 		link_dec->task_decoded = 0;
1337 		link_dec->task_total = 0;
1338 		enable_irq(mpp->irq);
1339 	}
1340 	/*
1341 	 * process pending queue to find the task to accept.
1342 	 */
1343 	mutex_lock(&queue->pending_lock);
1344 	task = list_first_entry_or_null(&queue->pending_list, struct mpp_task,
1345 					queue_link);
1346 	mutex_unlock(&queue->pending_lock);
1347 	if (!task)
1348 		goto done;
1349 
1350 	if (test_bit(TASK_STATE_ABORT, &task->state)) {
1351 		struct rkvdec2_task *dec_task = to_rkvdec2_task(task);
1352 
1353 		mutex_lock(&queue->pending_lock);
1354 		list_del_init(&task->queue_link);
1355 
1356 		kref_get(&task->ref);
1357 		set_bit(TASK_STATE_ABORT_READY, &task->state);
1358 		set_bit(TASK_STATE_PROC_DONE, &task->state);
1359 
1360 		mutex_unlock(&queue->pending_lock);
1361 		wake_up(&dec_task->wait);
1362 		kref_put(&task->ref, rkvdec2_link_free_task);
1363 		goto again;
1364 	}
1365 
1366 	/*
1367 	 * if target device can accept more task send the task to run.
1368 	 */
1369 	if (link_dec->task_running >= link_dec->task_capacity - 2)
1370 		goto done;
1371 
1372 	if (mpp_task_queue(mpp, task)) {
1373 		/* failed to run */
1374 		mpp_err("%p failed to process task %p:%d\n",
1375 			mpp, task, task->task_id);
1376 	} else {
1377 		mutex_lock(&queue->pending_lock);
1378 		set_bit(TASK_STATE_RUNNING, &task->state);
1379 		list_move_tail(&task->queue_link, &queue->running_list);
1380 		mutex_unlock(&queue->pending_lock);
1381 		goto again;
1382 	}
1383 done:
1384 	mpp_debug_leave();
1385 
1386 	if (link_dec->task_irq != link_dec->task_irq_prev ||
1387 	    atomic_read(&link_dec->task_timeout) != link_dec->task_timeout_prev)
1388 		rkvdec2_link_trigger_work(mpp);
1389 
1390 	/* if no task for running power off device */
1391 	{
1392 		u32 all_done = 0;
1393 
1394 		mutex_lock(&queue->pending_lock);
1395 		all_done = list_empty(&queue->pending_list);
1396 		mutex_unlock(&queue->pending_lock);
1397 
1398 		if (all_done && !link_dec->task_running && !link_dec->task_prepared)
1399 			rkvdec2_link_power_off(mpp);
1400 	}
1401 
1402 	mutex_lock(&queue->session_lock);
1403 	while (queue->detach_count) {
1404 		struct mpp_session *session = NULL;
1405 
1406 		session = list_first_entry_or_null(&queue->session_detach, struct mpp_session,
1407 				session_link);
1408 		if (session) {
1409 			list_del_init(&session->session_link);
1410 			queue->detach_count--;
1411 		}
1412 
1413 		mutex_unlock(&queue->session_lock);
1414 
1415 		if (session) {
1416 			mpp_dbg_session("%s detach count %d\n", dev_name(mpp->dev),
1417 					queue->detach_count);
1418 			mpp_session_deinit(session);
1419 		}
1420 
1421 		mutex_lock(&queue->session_lock);
1422 	}
1423 	mutex_unlock(&queue->session_lock);
1424 }
1425 
rkvdec2_link_session_deinit(struct mpp_session * session)1426 void rkvdec2_link_session_deinit(struct mpp_session *session)
1427 {
1428 	struct mpp_dev *mpp = session->mpp;
1429 
1430 	mpp_debug_enter();
1431 
1432 	rkvdec2_free_session(session);
1433 
1434 	if (session->dma) {
1435 		mpp_dbg_session("session %d destroy dma\n", session->index);
1436 		mpp_iommu_down_read(mpp->iommu_info);
1437 		mpp_dma_session_destroy(session->dma);
1438 		mpp_iommu_up_read(mpp->iommu_info);
1439 		session->dma = NULL;
1440 	}
1441 	if (session->srv) {
1442 		struct mpp_service *srv = session->srv;
1443 
1444 		mutex_lock(&srv->session_lock);
1445 		list_del_init(&session->service_link);
1446 		mutex_unlock(&srv->session_lock);
1447 	}
1448 	list_del_init(&session->session_link);
1449 
1450 	mpp_dbg_session("session %d release\n", session->index);
1451 
1452 	mpp_debug_leave();
1453 }
1454 
rkvdec2_attach_ccu(struct device * dev,struct rkvdec2_dev * dec)1455 int rkvdec2_attach_ccu(struct device *dev, struct rkvdec2_dev *dec)
1456 {
1457 	int ret;
1458 	struct device_node *np;
1459 	struct platform_device *pdev;
1460 	struct rkvdec2_ccu *ccu;
1461 	struct mpp_taskqueue *queue;
1462 
1463 	mpp_debug_enter();
1464 
1465 	np = of_parse_phandle(dev->of_node, "rockchip,ccu", 0);
1466 	if (!np || !of_device_is_available(np))
1467 		return -ENODEV;
1468 
1469 	pdev = of_find_device_by_node(np);
1470 	of_node_put(np);
1471 	if (!pdev)
1472 		return -ENODEV;
1473 
1474 	ccu = platform_get_drvdata(pdev);
1475 	if (!ccu)
1476 		return -ENOMEM;
1477 
1478 	ret = of_property_read_u32(dev->of_node, "rockchip,core-mask", &dec->core_mask);
1479 	if (ret)
1480 		return ret;
1481 	dev_info(dev, "core_mask=%08x\n", dec->core_mask);
1482 
1483 	/* if not the main-core, then attach the main core domain to current */
1484 	queue = dec->mpp.queue;
1485 	if (&dec->mpp != queue->cores[0]) {
1486 		struct mpp_iommu_info *ccu_info, *cur_info;
1487 
1488 		/* set the ccu-domain for current device */
1489 		ccu_info = queue->cores[0]->iommu_info;
1490 		cur_info = dec->mpp.iommu_info;
1491 		cur_info->domain = ccu_info->domain;
1492 		mpp_iommu_attach(cur_info);
1493 	}
1494 
1495 	dec->ccu = ccu;
1496 
1497 	dev_info(dev, "attach ccu as core %d\n", dec->mpp.core_id);
1498 	mpp_debug_enter();
1499 
1500 	return 0;
1501 }
1502 
rkvdec2_ccu_link_timeout_work(struct work_struct * work_s)1503 static void rkvdec2_ccu_link_timeout_work(struct work_struct *work_s)
1504 {
1505 	struct mpp_dev *mpp;
1506 	struct mpp_session *session;
1507 	struct mpp_task *task = container_of(to_delayed_work(work_s),
1508 					     struct mpp_task, timeout_work);
1509 
1510 	if (test_and_set_bit(TASK_STATE_HANDLE, &task->state)) {
1511 		mpp_err("task %d state %lx has been handled\n",
1512 			task->task_id, task->state);
1513 		return;
1514 	}
1515 
1516 	if (!task->session) {
1517 		mpp_err("task %d session is null.\n", task->task_id);
1518 		return;
1519 	}
1520 	session = task->session;
1521 
1522 	if (!session->mpp) {
1523 		mpp_err("task %d:%d mpp is null.\n", session->index,
1524 			task->task_id);
1525 		return;
1526 	}
1527 	mpp = task->mpp ? task->mpp : session->mpp;
1528 	mpp_err("task timeout\n");
1529 	set_bit(TASK_STATE_TIMEOUT, &task->state);
1530 	atomic_inc(&mpp->reset_request);
1531 	atomic_inc(&mpp->queue->reset_request);
1532 	kthread_queue_work(&mpp->queue->worker, &mpp->work);
1533 }
1534 
rkvdec2_ccu_link_init(struct platform_device * pdev,struct rkvdec2_dev * dec)1535 int rkvdec2_ccu_link_init(struct platform_device *pdev, struct rkvdec2_dev *dec)
1536 {
1537 	struct resource *res;
1538 	struct rkvdec_link_dev *link_dec;
1539 	struct device *dev = &pdev->dev;
1540 
1541 	mpp_debug_enter();
1542 
1543 	/* link structure */
1544 	link_dec = devm_kzalloc(dev, sizeof(*link_dec), GFP_KERNEL);
1545 	if (!link_dec)
1546 		return -ENOMEM;
1547 
1548 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "link");
1549 	if (!res)
1550 		return -ENOMEM;
1551 
1552 	link_dec->info = &rkvdec_link_v2_hw_info;
1553 	link_dec->reg_base = devm_ioremap(dev, res->start, resource_size(res));
1554 	if (!link_dec->reg_base) {
1555 		dev_err(dev, "ioremap failed for resource %pR\n", res);
1556 		return -ENOMEM;
1557 	}
1558 
1559 	dec->link_dec = link_dec;
1560 
1561 	mpp_debug_leave();
1562 
1563 	return 0;
1564 }
1565 
rkvdec2_ccu_link_session_detach(struct mpp_dev * mpp,struct mpp_taskqueue * queue)1566 static int rkvdec2_ccu_link_session_detach(struct mpp_dev *mpp,
1567 					   struct mpp_taskqueue *queue)
1568 {
1569 	mutex_lock(&queue->session_lock);
1570 	while (queue->detach_count) {
1571 		struct mpp_session *session = NULL;
1572 
1573 		session = list_first_entry_or_null(&queue->session_detach,
1574 						   struct mpp_session,
1575 						   session_link);
1576 		if (session) {
1577 			list_del_init(&session->session_link);
1578 			queue->detach_count--;
1579 		}
1580 
1581 		mutex_unlock(&queue->session_lock);
1582 
1583 		if (session) {
1584 			mpp_dbg_session("%s detach count %d\n", dev_name(mpp->dev),
1585 					queue->detach_count);
1586 			mpp_session_deinit(session);
1587 		}
1588 
1589 		mutex_lock(&queue->session_lock);
1590 	}
1591 	mutex_unlock(&queue->session_lock);
1592 
1593 	return 0;
1594 }
1595 
rkvdec2_ccu_power_on(struct mpp_taskqueue * queue,struct rkvdec2_ccu * ccu)1596 static int rkvdec2_ccu_power_on(struct mpp_taskqueue *queue,
1597 				struct rkvdec2_ccu *ccu)
1598 {
1599 	if (!atomic_xchg(&ccu->power_enabled, 1)) {
1600 		u32 i;
1601 		struct mpp_dev *mpp;
1602 
1603 		/* ccu pd and clk on */
1604 		pm_runtime_get_sync(ccu->dev);
1605 		pm_stay_awake(ccu->dev);
1606 		mpp_clk_safe_enable(ccu->aclk_info.clk);
1607 		/* core pd and clk on */
1608 		for (i = 0; i < queue->core_count; i++) {
1609 			mpp = queue->cores[i];
1610 			pm_runtime_get_sync(mpp->dev);
1611 			pm_stay_awake(mpp->dev);
1612 			if (mpp->hw_ops->clk_on)
1613 				mpp->hw_ops->clk_on(mpp);
1614 		}
1615 		mpp_debug(DEBUG_CCU, "power on\n");
1616 	}
1617 
1618 	return 0;
1619 }
1620 
rkvdec2_ccu_power_off(struct mpp_taskqueue * queue,struct rkvdec2_ccu * ccu)1621 static int rkvdec2_ccu_power_off(struct mpp_taskqueue *queue,
1622 				 struct rkvdec2_ccu *ccu)
1623 {
1624 	if (atomic_xchg(&ccu->power_enabled, 0)) {
1625 		u32 i;
1626 		struct mpp_dev *mpp;
1627 
1628 		/* ccu pd and clk off */
1629 		mpp_clk_safe_disable(ccu->aclk_info.clk);
1630 		pm_relax(ccu->dev);
1631 		pm_runtime_mark_last_busy(ccu->dev);
1632 		pm_runtime_put_autosuspend(ccu->dev);
1633 		/* core pd and clk off */
1634 		for (i = 0; i < queue->core_count; i++) {
1635 			mpp = queue->cores[i];
1636 
1637 			if (mpp->hw_ops->clk_off)
1638 				mpp->hw_ops->clk_off(mpp);
1639 			pm_relax(mpp->dev);
1640 			pm_runtime_mark_last_busy(mpp->dev);
1641 			pm_runtime_put_autosuspend(mpp->dev);
1642 		}
1643 		mpp_debug(DEBUG_CCU, "power off\n");
1644 	}
1645 
1646 	return 0;
1647 }
1648 
rkvdec2_soft_ccu_dequeue(struct mpp_taskqueue * queue)1649 static int rkvdec2_soft_ccu_dequeue(struct mpp_taskqueue *queue)
1650 {
1651 	struct mpp_task *mpp_task = NULL, *n;
1652 
1653 	mpp_debug_enter();
1654 
1655 	list_for_each_entry_safe(mpp_task, n,
1656 				 &queue->running_list,
1657 				 queue_link) {
1658 		struct mpp_dev *mpp = mpp_get_task_used_device(mpp_task, mpp_task->session);
1659 		u32 irq_status = mpp->irq_status;
1660 		u32 timeout_flag = test_bit(TASK_STATE_TIMEOUT, &mpp_task->state);
1661 		u32 abort_flag = test_bit(TASK_STATE_ABORT, &mpp_task->state);
1662 
1663 		if (irq_status || timeout_flag || abort_flag) {
1664 			struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
1665 
1666 			set_bit(TASK_STATE_HANDLE, &mpp_task->state);
1667 			cancel_delayed_work(&mpp_task->timeout_work);
1668 			mpp_time_diff(mpp_task);
1669 			task->irq_status = irq_status;
1670 			mpp_debug(DEBUG_IRQ_CHECK, "irq_status=%08x, timeout=%u, abort=%u\n",
1671 				  irq_status, timeout_flag, abort_flag);
1672 			if (irq_status && mpp->dev_ops->finish)
1673 				mpp->dev_ops->finish(mpp, mpp_task);
1674 			else
1675 				task->reg[RKVDEC_REG_INT_EN_INDEX] = RKVDEC_TIMEOUT_STA;
1676 
1677 			set_bit(TASK_STATE_FINISH, &mpp_task->state);
1678 			set_bit(TASK_STATE_DONE, &mpp_task->state);
1679 
1680 			set_bit(mpp->core_id, &queue->core_idle);
1681 			mpp_dbg_core("set core %d idle %lx\n", mpp->core_id, queue->core_idle);
1682 			/* Wake up the GET thread */
1683 			wake_up(&mpp_task->wait);
1684 			/* free task */
1685 			list_del_init(&mpp_task->queue_link);
1686 			kref_put(&mpp_task->ref, mpp_free_task);
1687 		} else {
1688 			/* NOTE: break when meet not finish */
1689 			break;
1690 		}
1691 	}
1692 
1693 	mpp_debug_leave();
1694 	return 0;
1695 }
1696 
rkvdec2_soft_ccu_reset(struct mpp_taskqueue * queue,struct rkvdec2_ccu * ccu)1697 static int rkvdec2_soft_ccu_reset(struct mpp_taskqueue *queue,
1698 				  struct rkvdec2_ccu *ccu)
1699 {
1700 	int i;
1701 
1702 	for (i = queue->core_count - 1; i >= 0; i--) {
1703 		u32 val;
1704 
1705 		struct mpp_dev *mpp = queue->cores[i];
1706 		struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1707 
1708 		if (dec->disable_work)
1709 			continue;
1710 
1711 		dev_info(mpp->dev, "resetting...\n");
1712 		disable_hardirq(mpp->irq);
1713 
1714 		/* foce idle, disconnect core and ccu */
1715 		writel(dec->core_mask, ccu->reg_base + RKVDEC_CCU_CORE_IDLE_BASE);
1716 
1717 		/* soft reset */
1718 		mpp_write(mpp, RKVDEC_REG_IMPORTANT_BASE, RKVDEC_SOFTREST_EN);
1719 		udelay(5);
1720 		val = mpp_read(mpp, RKVDEC_REG_INT_EN);
1721 		if (!(val & RKVDEC_SOFT_RESET_READY))
1722 			mpp_err("soft reset fail, int %08x\n", val);
1723 		mpp_write(mpp, RKVDEC_REG_INT_EN, 0);
1724 
1725 		/* check bus idle */
1726 		val = mpp_read(mpp, RKVDEC_REG_DEBUG_INT_BASE);
1727 		if (!(val & RKVDEC_BIT_BUS_IDLE))
1728 			mpp_err("bus busy\n");
1729 
1730 #if IS_ENABLED(CONFIG_ROCKCHIP_SIP)
1731 		/* sip reset */
1732 		rockchip_dmcfreq_lock();
1733 		sip_smc_vpu_reset(i, 0, 0);
1734 		rockchip_dmcfreq_unlock();
1735 #else
1736 		rkvdec2_reset(mpp);
1737 #endif
1738 		/* clear error mask */
1739 		writel(dec->core_mask & RKVDEC_CCU_CORE_RW_MASK,
1740 		       ccu->reg_base + RKVDEC_CCU_CORE_ERR_BASE);
1741 		/* connect core and ccu */
1742 		writel(dec->core_mask & RKVDEC_CCU_CORE_RW_MASK,
1743 		       ccu->reg_base + RKVDEC_CCU_CORE_IDLE_BASE);
1744 		mpp_iommu_refresh(mpp->iommu_info, mpp->dev);
1745 		atomic_set(&mpp->reset_request, 0);
1746 
1747 		enable_irq(mpp->irq);
1748 		dev_info(mpp->dev, "reset done\n");
1749 	}
1750 	atomic_set(&queue->reset_request, 0);
1751 
1752 	return 0;
1753 }
1754 
rkvdec2_ccu_alloc_task(struct mpp_session * session,struct mpp_task_msgs * msgs)1755 void *rkvdec2_ccu_alloc_task(struct mpp_session *session,
1756 			     struct mpp_task_msgs *msgs)
1757 {
1758 	int ret;
1759 	struct rkvdec2_task *task;
1760 
1761 	task = kzalloc(sizeof(*task), GFP_KERNEL);
1762 	if (!task)
1763 		return NULL;
1764 
1765 	ret = rkvdec2_task_init(session->mpp, session, task, msgs);
1766 	if (ret) {
1767 		kfree(task);
1768 		return NULL;
1769 	}
1770 
1771 	return &task->mpp_task;
1772 }
1773 
rkvdec2_ccu_iommu_fault_handle(struct iommu_domain * iommu,struct device * iommu_dev,unsigned long iova,int status,void * arg)1774 int rkvdec2_ccu_iommu_fault_handle(struct iommu_domain *iommu,
1775 				   struct device *iommu_dev,
1776 				   unsigned long iova, int status, void *arg)
1777 {
1778 	u32 i = 0;
1779 	struct mpp_dev *mpp = (struct mpp_dev *)arg;
1780 
1781 	mpp_debug_enter();
1782 
1783 	atomic_inc(&mpp->queue->reset_request);
1784 	for (i = 0; i < mpp->queue->core_count; i++)
1785 		rk_iommu_mask_irq(mpp->queue->cores[i]->dev);
1786 
1787 	kthread_queue_work(&mpp->queue->worker, &mpp->work);
1788 
1789 	mpp_debug_leave();
1790 
1791 	return 0;
1792 }
1793 
rkvdec2_soft_ccu_irq(int irq,void * param)1794 irqreturn_t rkvdec2_soft_ccu_irq(int irq, void *param)
1795 {
1796 	struct mpp_dev *mpp = param;
1797 	u32 irq_status = mpp_read_relaxed(mpp, RKVDEC_REG_INT_EN);
1798 
1799 	if (irq_status & RKVDEC_IRQ_RAW) {
1800 		mpp_debug(DEBUG_IRQ_STATUS, "irq_status=%08x\n", irq_status);
1801 		if (irq_status & RKVDEC_INT_ERROR_MASK) {
1802 			atomic_inc(&mpp->reset_request);
1803 			atomic_inc(&mpp->queue->reset_request);
1804 		}
1805 		mpp_write(mpp, RKVDEC_REG_INT_EN, 0);
1806 		mpp->irq_status = irq_status;
1807 		kthread_queue_work(&mpp->queue->worker, &mpp->work);
1808 		return IRQ_HANDLED;
1809 	}
1810 	return IRQ_NONE;
1811 }
1812 
rkvdec2_set_core_info(u32 * reg,int idx)1813 static inline int rkvdec2_set_core_info(u32 *reg, int idx)
1814 {
1815 	u32 val = (idx << 16) & RKVDEC_REG_FILM_IDX_MASK;
1816 
1817 	reg[RKVDEC_REG_CORE_CTRL_INDEX] &= ~RKVDEC_REG_FILM_IDX_MASK;
1818 
1819 	reg[RKVDEC_REG_CORE_CTRL_INDEX] |= val;
1820 
1821 	return 0;
1822 }
1823 
rkvdec2_soft_ccu_enqueue(struct mpp_dev * mpp,struct mpp_task * mpp_task)1824 static int rkvdec2_soft_ccu_enqueue(struct mpp_dev *mpp, struct mpp_task *mpp_task)
1825 {
1826 	u32 i, reg_en, reg;
1827 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1828 	struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
1829 
1830 	mpp_debug_enter();
1831 
1832 	/* set reg for link */
1833 	reg = RKVDEC_LINK_BIT_CORE_WORK_MODE | RKVDEC_LINK_BIT_CCU_WORK_MODE;
1834 	writel_relaxed(reg, dec->link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
1835 
1836 	/* set reg for ccu */
1837 	writel_relaxed(RKVDEC_CCU_BIT_WORK_EN, dec->ccu->reg_base + RKVDEC_CCU_WORK_BASE);
1838 	writel_relaxed(RKVDEC_CCU_BIT_WORK_MODE, dec->ccu->reg_base + RKVDEC_CCU_WORK_MODE_BASE);
1839 	writel_relaxed(dec->core_mask, dec->ccu->reg_base + RKVDEC_CCU_CORE_WORK_BASE);
1840 
1841 	/* set cache size */
1842 	reg = RKVDEC_CACHE_PERMIT_CACHEABLE_ACCESS |
1843 		  RKVDEC_CACHE_PERMIT_READ_ALLOCATE;
1844 	if (!mpp_debug_unlikely(DEBUG_CACHE_32B))
1845 		reg |= RKVDEC_CACHE_LINE_SIZE_64_BYTES;
1846 
1847 	mpp_write_relaxed(mpp, RKVDEC_REG_CACHE0_SIZE_BASE, reg);
1848 	mpp_write_relaxed(mpp, RKVDEC_REG_CACHE1_SIZE_BASE, reg);
1849 	mpp_write_relaxed(mpp, RKVDEC_REG_CACHE2_SIZE_BASE, reg);
1850 	/* clear cache */
1851 	mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE0_BASE, 1);
1852 	mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE1_BASE, 1);
1853 	mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE2_BASE, 1);
1854 
1855 	mpp_iommu_flush_tlb(mpp->iommu_info);
1856 	/* set registers for hardware */
1857 	reg_en = mpp_task->hw_info->reg_en;
1858 	for (i = 0; i < task->w_req_cnt; i++) {
1859 		int s, e;
1860 		struct mpp_request *req = &task->w_reqs[i];
1861 
1862 		s = req->offset / sizeof(u32);
1863 		e = s + req->size / sizeof(u32);
1864 		mpp_write_req(mpp, task->reg, s, e, reg_en);
1865 	}
1866 	/* init current task */
1867 	mpp->cur_task = mpp_task;
1868 	mpp->irq_status = 0;
1869 	writel_relaxed(dec->core_mask, dec->ccu->reg_base + RKVDEC_CCU_CORE_STA_BASE);
1870 	/* Flush the register before the start the device */
1871 	wmb();
1872 	mpp_write(mpp, RKVDEC_REG_START_EN_BASE, task->reg[reg_en] | RKVDEC_START_EN);
1873 
1874 	mpp_debug_leave();
1875 
1876 	return 0;
1877 }
1878 
rkvdec2_get_idle_core(struct mpp_taskqueue * queue,struct mpp_task * mpp_task)1879 static struct mpp_dev *rkvdec2_get_idle_core(struct mpp_taskqueue *queue,
1880 					     struct mpp_task *mpp_task)
1881 {
1882 	u32 i = 0;
1883 	struct rkvdec2_dev *dec = NULL;
1884 
1885 	for (i = 0; i < queue->core_count; i++) {
1886 		struct rkvdec2_dev *core = to_rkvdec2_dev(queue->cores[i]);
1887 
1888 		if (core->disable_work)
1889 			continue;
1890 
1891 		if (test_bit(i, &queue->core_idle)) {
1892 			if (!dec) {
1893 				dec = core;
1894 				continue;
1895 			}
1896 			/* set the less work core */
1897 			if (core->task_index < dec->task_index)
1898 				dec = core;
1899 		}
1900 	}
1901 	/* if get core */
1902 	if (dec) {
1903 		mpp_task->mpp = &dec->mpp;
1904 		mpp_task->core_id = dec->mpp.core_id;
1905 		clear_bit(mpp_task->core_id, &queue->core_idle);
1906 		dec->task_index++;
1907 		mpp_dbg_core("clear core %d idle\n", mpp_task->core_id);
1908 		return mpp_task->mpp;
1909 	}
1910 
1911 	return NULL;
1912 }
1913 
rkvdec2_core_working(struct mpp_taskqueue * queue)1914 static bool rkvdec2_core_working(struct mpp_taskqueue *queue)
1915 {
1916 	u32 i = 0;
1917 	struct rkvdec2_dev *core;
1918 	bool flag = false;
1919 
1920 	for (i = 0; i < queue->core_count; i++) {
1921 		core = to_rkvdec2_dev(queue->cores[i]);
1922 		if (core->disable_work)
1923 			continue;
1924 		if (!test_bit(i, &queue->core_idle)) {
1925 			flag = true;
1926 			break;
1927 		}
1928 	}
1929 
1930 	return flag;
1931 }
1932 
rkvdec2_soft_ccu_worker(struct kthread_work * work_s)1933 void rkvdec2_soft_ccu_worker(struct kthread_work *work_s)
1934 {
1935 	struct mpp_task *mpp_task;
1936 	struct mpp_dev *mpp = container_of(work_s, struct mpp_dev, work);
1937 	struct mpp_taskqueue *queue = mpp->queue;
1938 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1939 
1940 	mpp_debug_enter();
1941 
1942 	/* process all finished task in running list */
1943 	rkvdec2_soft_ccu_dequeue(queue);
1944 
1945 	/* process reset request */
1946 	if (atomic_read(&queue->reset_request)) {
1947 		if (rkvdec2_core_working(queue))
1948 			goto out;
1949 		rkvdec2_ccu_power_on(queue, dec->ccu);
1950 		rkvdec2_soft_ccu_reset(queue, dec->ccu);
1951 	}
1952 
1953 get_task:
1954 	/* get one task form pending list */
1955 	mutex_lock(&queue->pending_lock);
1956 	mpp_task = list_first_entry_or_null(&queue->pending_list,
1957 					    struct mpp_task, queue_link);
1958 	mutex_unlock(&queue->pending_lock);
1959 	if (!mpp_task)
1960 		goto done;
1961 
1962 	if (test_bit(TASK_STATE_ABORT, &mpp_task->state)) {
1963 		mutex_lock(&queue->pending_lock);
1964 		list_del_init(&mpp_task->queue_link);
1965 		mutex_unlock(&queue->pending_lock);
1966 		goto get_task;
1967 	}
1968 	/* find one core is idle */
1969 	mpp = rkvdec2_get_idle_core(queue, mpp_task);
1970 	if (!mpp)
1971 		goto out;
1972 
1973 	/* set session index */
1974 	rkvdec2_set_core_info(mpp_task->reg, mpp_task->session->index);
1975 	/* set rcb buffer */
1976 	mpp_set_rcbbuf(mpp, mpp_task->session, mpp_task);
1977 
1978 	/* pending to running */
1979 	mutex_lock(&queue->pending_lock);
1980 	list_move_tail(&mpp_task->queue_link, &queue->running_list);
1981 	mutex_unlock(&queue->pending_lock);
1982 	set_bit(TASK_STATE_RUNNING, &mpp_task->state);
1983 
1984 	mpp_time_record(mpp_task);
1985 	mpp_debug(DEBUG_TASK_INFO, "pid %d, start hw %s\n",
1986 		  mpp_task->session->pid, dev_name(mpp->dev));
1987 	set_bit(TASK_STATE_START, &mpp_task->state);
1988 	INIT_DELAYED_WORK(&mpp_task->timeout_work, rkvdec2_ccu_link_timeout_work);
1989 	schedule_delayed_work(&mpp_task->timeout_work, msecs_to_jiffies(WORK_TIMEOUT_MS));
1990 	rkvdec2_ccu_power_on(queue, dec->ccu);
1991 	rkvdec2_soft_ccu_enqueue(mpp, mpp_task);
1992 done:
1993 	if (list_empty(&queue->running_list) &&
1994 	    list_empty(&queue->pending_list))
1995 		rkvdec2_ccu_power_off(queue, dec->ccu);
1996 out:
1997 	/* session detach out of queue */
1998 	rkvdec2_ccu_link_session_detach(mpp, queue);
1999 
2000 	mpp_debug_leave();
2001 }
2002