• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2 /*
3  * Copyright (c) 2021 Rockchip Electronics Co., Ltd
4  *
5  * author:
6  *	Herman Chen <herman.chen@rock-chips.com>
7  */
8 
9 #include <linux/delay.h>
10 #include <linux/interrupt.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/slab.h>
13 #include <soc/rockchip/pm_domains.h>
14 #include <soc/rockchip/rockchip_dmc.h>
15 #include <soc/rockchip/rockchip_iommu.h>
16 
17 #include "mpp_rkvdec2_link.h"
18 
19 #include "hack/mpp_rkvdec2_link_hack_rk3568.c"
20 
21 #define WORK_TIMEOUT_MS		(200)
22 #define WAIT_TIMEOUT_MS		(500)
23 
24 #define RKVDEC_MAX_WRITE_PART	6
25 #define RKVDEC_MAX_READ_PART	2
26 
27 struct rkvdec_link_part {
28 	/* register offset of table buffer */
29 	u32 tb_reg_off;
30 	/* start idx of task register */
31 	u32 reg_start;
32 	/* number of task register */
33 	u32 reg_num;
34 };
35 
36 struct rkvdec_link_info {
37 	dma_addr_t iova;
38 	/* total register for link table buffer */
39 	u32 tb_reg_num;
40 	/* next link table addr in table buffer */
41 	u32 tb_reg_next;
42 	/* current read back addr in table buffer */
43 	u32 tb_reg_r;
44 	/* secondary enable in table buffer */
45 	u32 tb_reg_second_en;
46 	u32 part_w_num;
47 	u32 part_r_num;
48 
49 	struct rkvdec_link_part part_w[RKVDEC_MAX_WRITE_PART];
50 	struct rkvdec_link_part part_r[RKVDEC_MAX_READ_PART];
51 
52 	/* interrupt read back in table buffer */
53 	u32 tb_reg_int;
54 };
55 
56 static struct rkvdec_link_info rkvdec_link_v2_hw_info = {
57 	.tb_reg_num = 218,
58 	.tb_reg_next = 0,
59 	.tb_reg_r = 1,
60 	.tb_reg_second_en = 8,
61 
62 	.part_w_num = 6,
63 	.part_r_num = 2,
64 	.part_w[0] = {
65 		.tb_reg_off = 4,
66 		.reg_start = 8,
67 		.reg_num = 28,
68 	},
69 	.part_w[1] = {
70 		.tb_reg_off = 32,
71 		.reg_start = 64,
72 		.reg_num = 52,
73 	},
74 	.part_w[2] = {
75 		.tb_reg_off = 84,
76 		.reg_start = 128,
77 		.reg_num = 16,
78 	},
79 	.part_w[3] = {
80 		.tb_reg_off = 100,
81 		.reg_start = 160,
82 		.reg_num = 48,
83 	},
84 	.part_w[4] = {
85 		.tb_reg_off = 148,
86 		.reg_start = 224,
87 		.reg_num = 16,
88 	},
89 	.part_w[5] = {
90 		.tb_reg_off = 164,
91 		.reg_start = 256,
92 		.reg_num = 16,
93 	},
94 	.part_r[0] = {
95 		.tb_reg_off = 180,
96 		.reg_start = 224,
97 		.reg_num = 10,
98 	},
99 	.part_r[1] = {
100 		.tb_reg_off = 190,
101 		.reg_start = 258,
102 		.reg_num = 28,
103 	},
104 	.tb_reg_int = 180,
105 };
106 
rkvdec_link_status_update(struct rkvdec_link_dev * dev)107 static void rkvdec_link_status_update(struct rkvdec_link_dev *dev)
108 {
109 	void __iomem *reg_base = dev->reg_base;
110 	u32 error_ff0, error_ff1;
111 	u32 enable_ff0, enable_ff1;
112 	u32 loop_count = 10;
113 	u32 val;
114 
115 	error_ff1 = (readl(reg_base + RKVDEC_LINK_DEC_NUM_BASE) &
116 		    RKVDEC_LINK_BIT_DEC_ERROR) ? 1 : 0;
117 	enable_ff1 = readl(reg_base + RKVDEC_LINK_EN_BASE);
118 
119 	dev->irq_status = readl(reg_base + RKVDEC_LINK_IRQ_BASE);
120 	dev->iova_curr = readl(reg_base + RKVDEC_LINK_CFG_ADDR_BASE);
121 	dev->link_mode = readl(reg_base + RKVDEC_LINK_MODE_BASE);
122 	dev->total = readl(reg_base + RKVDEC_LINK_TOTAL_NUM_BASE);
123 	dev->iova_next = readl(reg_base + RKVDEC_LINK_NEXT_ADDR_BASE);
124 
125 	do {
126 		val = readl(reg_base + RKVDEC_LINK_DEC_NUM_BASE);
127 		error_ff0 = (val & RKVDEC_LINK_BIT_DEC_ERROR) ? 1 : 0;
128 		enable_ff0 = readl(reg_base + RKVDEC_LINK_EN_BASE);
129 
130 		if (error_ff0 == error_ff1 && enable_ff0 == enable_ff1)
131 			break;
132 
133 		error_ff1 = error_ff0;
134 		enable_ff1 = enable_ff0;
135 	} while (--loop_count);
136 
137 	dev->error = error_ff0;
138 	dev->decoded_status = val;
139 	dev->decoded = RKVDEC_LINK_GET_DEC_NUM(val);
140 	dev->enabled = enable_ff0;
141 
142 	if (!loop_count)
143 		dev_info(dev->dev, "reach last 10 count\n");
144 }
145 
rkvdec_link_node_dump(const char * func,struct rkvdec_link_dev * dev)146 static void rkvdec_link_node_dump(const char *func, struct rkvdec_link_dev *dev)
147 {
148 	u32 *table_base = (u32 *)dev->table->vaddr;
149 	u32 reg_count = dev->link_reg_count;
150 	u32 iova = (u32)dev->table->iova;
151 	u32 *reg = NULL;
152 	u32 i, j;
153 
154 	for (i = 0; i < dev->task_size; i++) {
155 		reg = table_base + i * reg_count;
156 
157 		mpp_err("slot %d link config iova %08x:\n", i,
158 			iova + i * dev->link_node_size);
159 
160 		for (j = 0; j < reg_count; j++) {
161 			mpp_err("reg%03d 0x%08x\n", j, reg[j]);
162 			udelay(100);
163 		}
164 	}
165 }
166 
rkvdec_core_reg_dump(const char * func,struct rkvdec_link_dev * dev)167 static void rkvdec_core_reg_dump(const char *func, struct rkvdec_link_dev *dev)
168 {
169 	struct mpp_dev *mpp = dev->mpp;
170 	u32 s = mpp->var->hw_info->reg_start;
171 	u32 e = mpp->var->hw_info->reg_end;
172 	u32 i;
173 
174 	mpp_err("--- dump hardware register ---\n");
175 
176 	for (i = s; i <= e; i++) {
177 		u32 reg = i * sizeof(u32);
178 
179 		mpp_err("reg[%03d]: %04x: 0x%08x\n",
180 			i, reg, readl_relaxed(mpp->reg_base + reg));
181 		udelay(100);
182 	}
183 }
184 
rkvdec_link_reg_dump(const char * func,struct rkvdec_link_dev * dev)185 static void rkvdec_link_reg_dump(const char *func, struct rkvdec_link_dev *dev)
186 {
187 	mpp_err("dump link config status from %s\n", func);
188 	mpp_err("reg 0 %08x - irq status\n", dev->irq_status);
189 	mpp_err("reg 1 %08x - cfg addr\n", dev->iova_curr);
190 	mpp_err("reg 2 %08x - link mode\n", dev->link_mode);
191 	mpp_err("reg 4 %08x - decoded num\n", dev->decoded_status);
192 	mpp_err("reg 5 %08x - total num\n", dev->total);
193 	mpp_err("reg 6 %08x - link mode en\n", dev->enabled);
194 	mpp_err("reg 6 %08x - next ltb addr\n", dev->iova_next);
195 }
196 
rkvdec_link_counter(const char * func,struct rkvdec_link_dev * dev)197 static void rkvdec_link_counter(const char *func, struct rkvdec_link_dev *dev)
198 {
199 	mpp_err("dump link counter from %s\n", func);
200 
201 	mpp_err("task write %d read %d send %d recv %d run %d decoded %d total %d\n",
202 		dev->task_write, dev->task_read, dev->task_send, dev->task_recv,
203 		dev->task_to_run, dev->task_decoded, dev->task_total);
204 }
205 
rkvdec_link_dump(struct mpp_dev * mpp)206 int rkvdec_link_dump(struct mpp_dev *mpp)
207 {
208 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
209 	struct rkvdec_link_dev *dev = dec->link_dec;
210 
211 	rkvdec_link_status_update(dev);
212 	rkvdec_link_reg_dump(__func__, dev);
213 	rkvdec_link_counter(__func__, dev);
214 	rkvdec_core_reg_dump(__func__, dev);
215 	rkvdec_link_node_dump(__func__, dev);
216 
217 	return 0;
218 }
219 
rkvdec_link_get_task_write(struct rkvdec_link_dev * dev)220 static int rkvdec_link_get_task_write(struct rkvdec_link_dev *dev)
221 {
222 	int idx = dev->task_write < dev->task_size ? dev->task_write :
223 		  dev->task_write - dev->task_size;
224 
225 	return idx;
226 }
rkvdec_link_inc_task_write(struct rkvdec_link_dev * dev)227 static int rkvdec_link_inc_task_write(struct rkvdec_link_dev *dev)
228 {
229 	int task_write = rkvdec_link_get_task_write(dev);
230 
231 	dev->task_write++;
232 	if (dev->task_write >= dev->task_size * 2)
233 		dev->task_write = 0;
234 
235 	return task_write;
236 }
rkvdec_link_get_task_read(struct rkvdec_link_dev * dev)237 static int rkvdec_link_get_task_read(struct rkvdec_link_dev *dev)
238 {
239 	int idx = dev->task_read < dev->task_size ? dev->task_read :
240 		  dev->task_read - dev->task_size;
241 
242 	return idx;
243 }
rkvdec_link_inc_task_read(struct rkvdec_link_dev * dev)244 static int rkvdec_link_inc_task_read(struct rkvdec_link_dev *dev)
245 {
246 	int task_read = rkvdec_link_get_task_read(dev);
247 
248 	dev->task_read++;
249 	if (dev->task_read >= dev->task_size * 2)
250 		dev->task_read = 0;
251 
252 	return task_read;
253 }
rkvdec_link_get_task_hw_queue_length(struct rkvdec_link_dev * dev)254 static int rkvdec_link_get_task_hw_queue_length(struct rkvdec_link_dev *dev)
255 {
256 	int len;
257 
258 	if (dev->task_send <= dev->task_recv)
259 		len = dev->task_send + dev->task_size - dev->task_recv;
260 	else
261 		len = dev->task_send - dev->task_recv - dev->task_size;
262 
263 	return len;
264 }
rkvdec_link_get_task_send(struct rkvdec_link_dev * dev)265 static int rkvdec_link_get_task_send(struct rkvdec_link_dev *dev)
266 {
267 	int idx = dev->task_send < dev->task_size ? dev->task_send :
268 		  dev->task_send - dev->task_size;
269 
270 	return idx;
271 }
rkvdec_link_inc_task_send(struct rkvdec_link_dev * dev)272 static int rkvdec_link_inc_task_send(struct rkvdec_link_dev *dev)
273 {
274 	int task_send = rkvdec_link_get_task_send(dev);
275 
276 	dev->task_send++;
277 	if (dev->task_send >= dev->task_size * 2)
278 		dev->task_send = 0;
279 
280 	return task_send;
281 }
rkvdec_link_inc_task_recv(struct rkvdec_link_dev * dev)282 static int rkvdec_link_inc_task_recv(struct rkvdec_link_dev *dev)
283 {
284 	int task_recv = dev->task_recv;
285 
286 	dev->task_recv++;
287 	if (dev->task_recv >= dev->task_size * 2)
288 		dev->task_recv = 0;
289 
290 	return task_recv;
291 }
292 
rkvdec_link_get_next_slot(struct rkvdec_link_dev * dev)293 static int rkvdec_link_get_next_slot(struct rkvdec_link_dev *dev)
294 {
295 	int next = -1;
296 
297 	if (dev->task_write == dev->task_read)
298 		return next;
299 
300 	next = rkvdec_link_get_task_write(dev);
301 
302 	return next;
303 }
304 
rkvdec_link_write_task_to_slot(struct rkvdec_link_dev * dev,int idx,struct mpp_task * mpp_task)305 static int rkvdec_link_write_task_to_slot(struct rkvdec_link_dev *dev, int idx,
306 					  struct mpp_task *mpp_task)
307 {
308 	u32 i, off, s, n;
309 	struct rkvdec_link_part *part;
310 	struct rkvdec_link_info *info;
311 	struct mpp_dma_buffer *table;
312 	struct rkvdec2_task *task;
313 	int slot_idx;
314 	u32 *tb_reg;
315 
316 	if (idx < 0 || idx >= dev->task_size) {
317 		mpp_err("send invalid task index %d\n", idx);
318 		return -1;
319 	}
320 
321 	info = dev->info;
322 	part = info->part_w;
323 	table = dev->table;
324 	task = to_rkvdec2_task(mpp_task);
325 
326 	slot_idx = rkvdec_link_inc_task_write(dev);
327 	if (idx != slot_idx)
328 		dev_info(dev->dev, "slot index mismatch %d vs %d\n",
329 			 idx, slot_idx);
330 
331 	if (task->need_hack) {
332 		tb_reg = (u32 *)table->vaddr + slot_idx * dev->link_reg_count;
333 
334 		rkvdec2_3568_hack_fix_link(tb_reg + 4);
335 
336 		/* setup error mode flag */
337 		dev->tasks_hw[slot_idx] = NULL;
338 		dev->task_to_run++;
339 		dev->task_prepared++;
340 		slot_idx = rkvdec_link_inc_task_write(dev);
341 	}
342 
343 	tb_reg = (u32 *)table->vaddr + slot_idx * dev->link_reg_count;
344 
345 	for (i = 0; i < info->part_w_num; i++) {
346 		off = part[i].tb_reg_off;
347 		s = part[i].reg_start;
348 		n = part[i].reg_num;
349 		memcpy(&tb_reg[off], &task->reg[s], n * sizeof(u32));
350 	}
351 
352 	/* setup error mode flag */
353 	tb_reg[9] |= BIT(18) | BIT(9);
354 	tb_reg[info->tb_reg_second_en] |= RKVDEC_WAIT_RESET_EN;
355 
356 	/* memset read registers */
357 	part = info->part_r;
358 	for (i = 0; i < info->part_r_num; i++) {
359 		off = part[i].tb_reg_off;
360 		n = part[i].reg_num;
361 		memset(&tb_reg[off], 0, n * sizeof(u32));
362 	}
363 
364 	dev->tasks_hw[slot_idx] = mpp_task;
365 	task->slot_idx = slot_idx;
366 	dev->task_to_run++;
367 	dev->task_prepared++;
368 	mpp_dbg_link_flow("slot %d write task %d\n", slot_idx,
369 			  mpp_task->task_index);
370 
371 	return 0;
372 }
373 
rkvdec2_clear_cache(struct mpp_dev * mpp)374 static void rkvdec2_clear_cache(struct mpp_dev *mpp)
375 {
376 	/* set cache size */
377 	u32 reg = RKVDEC_CACHE_PERMIT_CACHEABLE_ACCESS |
378 		  RKVDEC_CACHE_PERMIT_READ_ALLOCATE;
379 
380 	if (!mpp_debug_unlikely(DEBUG_CACHE_32B))
381 		reg |= RKVDEC_CACHE_LINE_SIZE_64_BYTES;
382 
383 	mpp_write_relaxed(mpp, RKVDEC_REG_CACHE0_SIZE_BASE, reg);
384 	mpp_write_relaxed(mpp, RKVDEC_REG_CACHE1_SIZE_BASE, reg);
385 	mpp_write_relaxed(mpp, RKVDEC_REG_CACHE2_SIZE_BASE, reg);
386 
387 	/* clear cache */
388 	mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE0_BASE, 1);
389 	mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE1_BASE, 1);
390 	mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE2_BASE, 1);
391 }
392 
rkvdec_link_send_task_to_hw(struct rkvdec_link_dev * dev,struct mpp_task * mpp_task,int slot_idx,u32 task_to_run,int resend)393 static int rkvdec_link_send_task_to_hw(struct rkvdec_link_dev *dev,
394 				       struct mpp_task *mpp_task,
395 				       int slot_idx, u32 task_to_run,
396 				       int resend)
397 {
398 	void __iomem *reg_base = dev->reg_base;
399 	struct mpp_dma_buffer *table = dev->table;
400 	u32 task_total = dev->task_total;
401 	u32 mode_start = 0;
402 	u32 val;
403 
404 	/* write address */
405 	if (!task_to_run || task_to_run > dev->task_size ||
406 	    slot_idx < 0 || slot_idx >= dev->task_size) {
407 		mpp_err("invalid task send cfg at %d count %d\n",
408 			slot_idx, task_to_run);
409 		rkvdec_link_counter("error on send", dev);
410 		return 0;
411 	}
412 
413 	val = task_to_run;
414 	if (!task_total || resend)
415 		mode_start = 1;
416 
417 	if (mode_start) {
418 		u32 iova = table->iova + slot_idx * dev->link_node_size;
419 
420 		rkvdec2_clear_cache(dev->mpp);
421 		/* cleanup counter in hardware */
422 		writel(0, reg_base + RKVDEC_LINK_MODE_BASE);
423 		/* start config before all registers are set */
424 		wmb();
425 		writel(RKVDEC_LINK_BIT_CFG_DONE, reg_base + RKVDEC_LINK_CFG_CTRL_BASE);
426 		/* write zero count config */
427 		wmb();
428 		/* clear counter and enable link mode hardware */
429 		writel(RKVDEC_LINK_BIT_EN, reg_base + RKVDEC_LINK_EN_BASE);
430 
431 		dev->task_total = 0;
432 		dev->task_decoded = 0;
433 
434 		writel_relaxed(iova, reg_base + RKVDEC_LINK_CFG_ADDR_BASE);
435 	} else {
436 		val |= RKVDEC_LINK_BIT_ADD_MODE;
437 	}
438 
439 	if (!resend) {
440 		u32 i;
441 
442 		for (i = 0; i < task_to_run; i++) {
443 			int next_idx = rkvdec_link_inc_task_send(dev);
444 			struct mpp_task *task_ddr = dev->tasks_hw[next_idx];
445 
446 			if (!task_ddr)
447 				continue;
448 
449 			set_bit(TASK_STATE_START, &task_ddr->state);
450 			schedule_delayed_work(&task_ddr->timeout_work,
451 					      msecs_to_jiffies(200));
452 		}
453 	} else {
454 		if (task_total)
455 			dev_info(dev->dev, "resend with total %d\n", task_total);
456 	}
457 
458 	/* set link mode */
459 	writel_relaxed(val, reg_base + RKVDEC_LINK_MODE_BASE);
460 
461 	/* start config before all registers are set */
462 	wmb();
463 
464 	/* configure done */
465 	writel(RKVDEC_LINK_BIT_CFG_DONE, reg_base + RKVDEC_LINK_CFG_CTRL_BASE);
466 
467 	mpp_dbg_link_flow("slot %d enable task %d mode %s\n", slot_idx,
468 			  task_to_run, mode_start ? "start" : "add");
469 	if (mode_start) {
470 		/* start hardware before all registers are set */
471 		wmb();
472 		/* clear counter and enable link mode hardware */
473 		writel(RKVDEC_LINK_BIT_EN, reg_base + RKVDEC_LINK_EN_BASE);
474 	}
475 
476 	dev->task_total += task_to_run;
477 	return 0;
478 }
479 
rkvdec2_link_finish(struct mpp_dev * mpp,struct mpp_task * mpp_task)480 static int rkvdec2_link_finish(struct mpp_dev *mpp, struct mpp_task *mpp_task)
481 {
482 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
483 	struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
484 	struct rkvdec_link_dev *link_dec = dec->link_dec;
485 	struct mpp_dma_buffer *table = link_dec->table;
486 	struct rkvdec_link_info *info = link_dec->info;
487 	struct rkvdec_link_part *part = info->part_r;
488 	int slot_idx = task->slot_idx;
489 	u32 *tb_reg = (u32 *)(table->vaddr + slot_idx * link_dec->link_node_size);
490 	u32 off, s, n;
491 	u32 i;
492 
493 	mpp_debug_enter();
494 
495 	for (i = 0; i < info->part_r_num; i++) {
496 		off = part[i].tb_reg_off;
497 		s = part[i].reg_start;
498 		n = part[i].reg_num;
499 		memcpy(&task->reg[s], &tb_reg[off], n * sizeof(u32));
500 	}
501 
502 	mpp_debug_leave();
503 
504 	return 0;
505 }
506 
rkvdec_link_isr_recv_task(struct mpp_dev * mpp,struct rkvdec_link_dev * link_dec,int count)507 static int rkvdec_link_isr_recv_task(struct mpp_dev *mpp,
508 				     struct rkvdec_link_dev *link_dec,
509 				     int count)
510 {
511 	struct rkvdec_link_info *info = link_dec->info;
512 	u32 *table_base = (u32 *)link_dec->table->vaddr;
513 	int i;
514 
515 	for (i = 0; i < count; i++) {
516 		int idx = rkvdec_link_get_task_read(link_dec);
517 		struct mpp_task *mpp_task = link_dec->tasks_hw[idx];
518 		struct rkvdec2_task *task = NULL;
519 		u32 *regs = NULL;
520 		u32 irq_status = 0;
521 
522 		if (!mpp_task) {
523 			regs = table_base + idx * link_dec->link_reg_count;
524 			mpp_dbg_link_flow("slot %d read  task stuff\n", idx);
525 
526 			link_dec->stuff_total++;
527 			if (link_dec->statistic_count &&
528 			    regs[RKVDEC_LINK_REG_CYCLE_CNT]) {
529 				link_dec->stuff_cycle_sum +=
530 					regs[RKVDEC_LINK_REG_CYCLE_CNT];
531 				link_dec->stuff_cnt++;
532 				if (link_dec->stuff_cnt >=
533 				    link_dec->statistic_count) {
534 					dev_info(
535 						link_dec->dev, "hw cycle %u\n",
536 						(u32)(link_dec->stuff_cycle_sum /
537 						      link_dec->statistic_count));
538 					link_dec->stuff_cycle_sum = 0;
539 					link_dec->stuff_cnt = 0;
540 				}
541 			}
542 
543 			if (link_dec->error && (i == (count - 1))) {
544 				link_dec->stuff_err++;
545 
546 				irq_status = mpp_read_relaxed(mpp, RKVDEC_REG_INT_EN);
547 				dev_info(link_dec->dev, "found stuff task error irq %08x %u/%u\n",
548 					 irq_status, link_dec->stuff_err,
549 					 link_dec->stuff_total);
550 
551 				if (link_dec->stuff_on_error) {
552 					dev_info(link_dec->dev, "stuff task error again %u/%u\n",
553 						 link_dec->stuff_err,
554 						 link_dec->stuff_total);
555 				}
556 
557 				link_dec->stuff_on_error = 1;
558 				/* resend task */
559 				link_dec->decoded--;
560 			} else {
561 				link_dec->stuff_on_error = 0;
562 				rkvdec_link_inc_task_recv(link_dec);
563 				rkvdec_link_inc_task_read(link_dec);
564 				link_dec->task_running--;
565 				link_dec->task_prepared--;
566 			}
567 
568 			continue;
569 		}
570 
571 		task = to_rkvdec2_task(mpp_task);
572 		regs = table_base + idx * link_dec->link_reg_count;
573 		irq_status = regs[info->tb_reg_int];
574 		mpp_dbg_link_flow("slot %d rd task %d\n", idx,
575 				  mpp_task->task_index);
576 
577 		task->irq_status = irq_status;
578 
579 		cancel_delayed_work_sync(&mpp_task->timeout_work);
580 		set_bit(TASK_STATE_HANDLE, &mpp_task->state);
581 
582 		if (link_dec->statistic_count &&
583 		    regs[RKVDEC_LINK_REG_CYCLE_CNT]) {
584 			link_dec->task_cycle_sum +=
585 				regs[RKVDEC_LINK_REG_CYCLE_CNT];
586 			link_dec->task_cnt++;
587 			if (link_dec->task_cnt >= link_dec->statistic_count) {
588 				dev_info(link_dec->dev, "hw cycle %u\n",
589 					 (u32)(link_dec->task_cycle_sum /
590 					       link_dec->statistic_count));
591 				link_dec->task_cycle_sum = 0;
592 				link_dec->task_cnt = 0;
593 			}
594 		}
595 
596 		rkvdec2_link_finish(mpp, mpp_task);
597 
598 		set_bit(TASK_STATE_FINISH, &mpp_task->state);
599 
600 		list_del_init(&mpp_task->queue_link);
601 		link_dec->task_running--;
602 		link_dec->task_prepared--;
603 
604 		rkvdec_link_inc_task_recv(link_dec);
605 		rkvdec_link_inc_task_read(link_dec);
606 
607 		if (test_bit(TASK_STATE_ABORT, &mpp_task->state))
608 			set_bit(TASK_STATE_ABORT_READY, &mpp_task->state);
609 
610 		set_bit(TASK_STATE_PROC_DONE, &mpp_task->state);
611 		/* Wake up the GET thread */
612 		wake_up(&task->wait);
613 	}
614 
615 	return 0;
616 }
617 
rkvdec2_link_prepare(struct mpp_dev * mpp,struct mpp_task * mpp_task)618 static void *rkvdec2_link_prepare(struct mpp_dev *mpp,
619 				  struct mpp_task *mpp_task)
620 {
621 	struct mpp_task *out_task = NULL;
622 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
623 	struct rkvdec_link_dev *link_dec = dec->link_dec;
624 	int ret = 0;
625 	int slot_idx;
626 
627 	mpp_debug_enter();
628 
629 	slot_idx = rkvdec_link_get_next_slot(link_dec);
630 	if (slot_idx < 0) {
631 		mpp_err("capacity %d running %d\n",
632 			mpp->task_capacity, link_dec->task_running);
633 		dev_err(link_dec->dev, "no slot to write on get next slot\n");
634 		goto done;
635 	}
636 
637 	ret = rkvdec_link_write_task_to_slot(link_dec, slot_idx, mpp_task);
638 	if (ret >= 0)
639 		out_task = mpp_task;
640 	else
641 		dev_err(mpp->dev, "no slot to write\n");
642 
643 done:
644 	mpp_debug_leave();
645 
646 	return out_task;
647 }
648 
rkvdec2_link_reset(struct mpp_dev * mpp)649 static int rkvdec2_link_reset(struct mpp_dev *mpp)
650 {
651 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
652 
653 	dev_info(mpp->dev, "resetting...\n");
654 
655 	/* FIXME lock resource lock of the other devices in combo */
656 	mpp_iommu_down_write(mpp->iommu_info);
657 	mpp_reset_down_write(mpp->reset_group);
658 	atomic_set(&mpp->reset_request, 0);
659 
660 	rockchip_save_qos(mpp->dev);
661 
662 	mutex_lock(&dec->sip_reset_lock);
663 	rockchip_dmcfreq_lock();
664 	sip_smc_vpu_reset(0, 0, 0);
665 	rockchip_dmcfreq_unlock();
666 	mutex_unlock(&dec->sip_reset_lock);
667 
668 	rockchip_restore_qos(mpp->dev);
669 
670 	/* Note: if the domain does not change, iommu attach will be return
671 	 * as an empty operation. Therefore, force to close and then open,
672 	 * will be update the domain. In this way, domain can really attach.
673 	 */
674 	mpp_iommu_refresh(mpp->iommu_info, mpp->dev);
675 
676 	mpp_reset_up_write(mpp->reset_group);
677 	mpp_iommu_up_write(mpp->iommu_info);
678 
679 	dev_info(mpp->dev, "reset done\n");
680 
681 	return 0;
682 }
683 
rkvdec2_link_irq(struct mpp_dev * mpp)684 static int rkvdec2_link_irq(struct mpp_dev *mpp)
685 {
686 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
687 	struct rkvdec_link_dev *link_dec = dec->link_dec;
688 	u32 irq_status = 0;
689 
690 	if (!atomic_read(&link_dec->power_enabled)) {
691 		dev_info(link_dec->dev, "irq on power off\n");
692 		return -1;
693 	}
694 
695 	irq_status = readl(link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
696 
697 	if (irq_status & RKVDEC_LINK_BIT_IRQ_RAW) {
698 		u32 enabled = readl(link_dec->reg_base + RKVDEC_LINK_EN_BASE);
699 
700 		if (!enabled) {
701 			u32 bus = mpp_read_relaxed(mpp, 273 * 4);
702 
703 			if (bus & 0x7ffff)
704 				dev_info(link_dec->dev,
705 					 "invalid bus status %08x\n", bus);
706 		}
707 
708 		link_dec->irq_status = irq_status;
709 		mpp->irq_status = mpp_read_relaxed(mpp, RKVDEC_REG_INT_EN);
710 
711 		writel_relaxed(0, link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
712 	}
713 
714 	mpp_debug(DEBUG_IRQ_STATUS | DEBUG_LINK_TABLE, "irq_status: %08x : %08x\n",
715 		  irq_status, mpp->irq_status);
716 
717 	return 0;
718 }
719 
rkvdec2_link_isr(struct mpp_dev * mpp)720 static int rkvdec2_link_isr(struct mpp_dev *mpp)
721 {
722 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
723 	struct rkvdec_link_dev *link_dec = dec->link_dec;
724 	/* keep irq_status */
725 	u32 irq_status = link_dec->irq_status;
726 	u32 prev_dec_num;
727 	int count = 0;
728 	u32 len = 0;
729 	u32 need_reset = atomic_read(&mpp->reset_request);
730 	u32 task_timeout = link_dec->task_on_timeout;
731 
732 	mpp_debug_enter();
733 
734 	disable_irq(mpp->irq);
735 	rkvdec_link_status_update(link_dec);
736 	link_dec->irq_status = irq_status;
737 	prev_dec_num = link_dec->task_decoded;
738 
739 	if (!link_dec->enabled || task_timeout) {
740 		u32 val;
741 
742 		if (task_timeout)
743 			rkvdec_link_reg_dump("timeout", link_dec);
744 
745 		val = mpp_read(mpp, 224 * 4);
746 		if (!(val & BIT(2))) {
747 			dev_info(mpp->dev, "frame not complete\n");
748 			link_dec->decoded++;
749 		}
750 	}
751 	count = (int)link_dec->decoded - (int)prev_dec_num;
752 
753 	/* handle counter wrap */
754 	if (link_dec->enabled && !count && !need_reset) {
755 		/* process extra isr when task is processed */
756 		enable_irq(mpp->irq);
757 		goto done;
758 	}
759 
760 	/* get previous ready task */
761 	if (count) {
762 		rkvdec_link_isr_recv_task(mpp, link_dec, count);
763 		link_dec->task_decoded = link_dec->decoded;
764 	}
765 
766 	if (!link_dec->enabled || need_reset)
767 		goto do_reset;
768 
769 	enable_irq(mpp->irq);
770 	goto done;
771 
772 do_reset:
773 	/* NOTE: irq may run with reset */
774 	atomic_inc(&mpp->reset_request);
775 	rkvdec2_link_reset(mpp);
776 	link_dec->task_decoded = 0;
777 	link_dec->task_total = 0;
778 	enable_irq(mpp->irq);
779 
780 	if (link_dec->total == link_dec->decoded)
781 		goto done;
782 
783 	len = rkvdec_link_get_task_hw_queue_length(link_dec);
784 	if (len > link_dec->task_size)
785 		rkvdec_link_counter("invalid len", link_dec);
786 
787 	if (len) {
788 		int slot_idx = rkvdec_link_get_task_read(link_dec);
789 		struct mpp_task *mpp_task = NULL;
790 
791 		mpp_task = link_dec->tasks_hw[slot_idx];
792 		rkvdec_link_send_task_to_hw(link_dec, mpp_task,
793 					    slot_idx, len, 1);
794 	}
795 
796 done:
797 	mpp_debug_leave();
798 
799 	return IRQ_HANDLED;
800 }
801 
rkvdec2_link_remove(struct mpp_dev * mpp,struct rkvdec_link_dev * link_dec)802 int rkvdec2_link_remove(struct mpp_dev *mpp, struct rkvdec_link_dev *link_dec)
803 {
804 	mpp_debug_enter();
805 
806 	if (link_dec && link_dec->table) {
807 		mpp_dma_free(link_dec->table);
808 		link_dec->table = NULL;
809 	}
810 
811 	mpp_debug_leave();
812 
813 	return 0;
814 }
815 
rkvdec2_link_alloc_table(struct mpp_dev * mpp,struct rkvdec_link_dev * link_dec)816 static int rkvdec2_link_alloc_table(struct mpp_dev *mpp,
817 				    struct rkvdec_link_dev *link_dec)
818 {
819 	int ret;
820 	struct mpp_dma_buffer *table;
821 	struct rkvdec_link_info *info = link_dec->info;
822 	/* NOTE: link table address requires 64 align */
823 	u32 task_capacity = link_dec->task_capacity;
824 	u32 link_node_size = ALIGN(info->tb_reg_num * sizeof(u32), 256);
825 	u32 link_info_size = task_capacity * link_node_size;
826 	u32 *v_curr;
827 	u32 io_curr, io_next, io_start;
828 	u32 offset_r = info->part_r[0].tb_reg_off * sizeof(u32);
829 	u32 i;
830 
831 	table = mpp_dma_alloc(mpp->dev, link_info_size);
832 	if (!table) {
833 		ret = -ENOMEM;
834 		goto err_free_node;
835 	}
836 
837 	link_dec->link_node_size = link_node_size;
838 	link_dec->link_reg_count = link_node_size >> 2;
839 	io_start = table->iova;
840 
841 	for (i = 0; i < task_capacity; i++) {
842 		v_curr  = (u32 *)(table->vaddr + i * link_node_size);
843 		io_curr = io_start + i * link_node_size;
844 		io_next = (i == task_capacity - 1) ?
845 			  io_start : io_start + (i + 1) * link_node_size;
846 
847 		v_curr[info->tb_reg_next] = io_next;
848 		v_curr[info->tb_reg_r] = io_curr + offset_r;
849 	}
850 
851 	link_dec->table	     = table;
852 	link_dec->task_size  = task_capacity;
853 	link_dec->task_count = 0;
854 	link_dec->task_write = 0;
855 	link_dec->task_read  = link_dec->task_size;
856 	link_dec->task_send  = 0;
857 	link_dec->task_recv  = link_dec->task_size;
858 
859 	return 0;
860 err_free_node:
861 	rkvdec2_link_remove(mpp, link_dec);
862 	return ret;
863 }
864 
865 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
rkvdec2_link_procfs_init(struct mpp_dev * mpp)866 int rkvdec2_link_procfs_init(struct mpp_dev *mpp)
867 {
868 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
869 	struct rkvdec_link_dev *link_dec = dec->link_dec;
870 
871 	if (!link_dec)
872 		return 0;
873 
874 	link_dec->statistic_count = 0;
875 
876 	if (dec->procfs)
877 		mpp_procfs_create_u32("statistic_count", 0644,
878 				      dec->procfs, &link_dec->statistic_count);
879 
880 	return 0;
881 }
882 #else
rkvdec2_link_procfs_init(struct mpp_dev * mpp)883 int rkvdec2_link_procfs_init(struct mpp_dev *mpp)
884 {
885 	return 0;
886 }
887 #endif
888 
rkvdec2_link_init(struct platform_device * pdev,struct rkvdec2_dev * dec)889 int rkvdec2_link_init(struct platform_device *pdev, struct rkvdec2_dev *dec)
890 {
891 	int ret;
892 	struct resource *res = NULL;
893 	struct rkvdec_link_dev *link_dec = NULL;
894 	struct device *dev = &pdev->dev;
895 	struct mpp_dev *mpp = &dec->mpp;
896 
897 	mpp_debug_enter();
898 
899 	link_dec = devm_kzalloc(dev, sizeof(*link_dec), GFP_KERNEL);
900 	if (!link_dec) {
901 		ret = -ENOMEM;
902 		goto done;
903 	}
904 
905 	link_dec->tasks_hw = devm_kzalloc(dev, sizeof(*link_dec->tasks_hw) *
906 					  mpp->task_capacity, GFP_KERNEL);
907 	if (!link_dec->tasks_hw) {
908 		ret = -ENOMEM;
909 		goto done;
910 	}
911 
912 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "link");
913 	if (res)
914 		link_dec->info = &rkvdec_link_v2_hw_info;
915 	else {
916 		dev_err(dev, "link mode resource not found\n");
917 		ret = -ENOMEM;
918 		goto done;
919 	}
920 
921 	link_dec->reg_base = devm_ioremap(dev, res->start, resource_size(res));
922 	if (!link_dec->reg_base) {
923 		dev_err(dev, "ioremap failed for resource %pR\n", res);
924 		ret = -ENOMEM;
925 		goto done;
926 	}
927 
928 	link_dec->task_capacity = mpp->task_capacity;
929 	ret = rkvdec2_link_alloc_table(&dec->mpp, link_dec);
930 	if (ret)
931 		goto done;
932 
933 	link_dec->mpp = mpp;
934 	link_dec->dev = dev;
935 	atomic_set(&link_dec->task_timeout, 0);
936 	atomic_set(&link_dec->power_enabled, 0);
937 	link_dec->irq_enabled = 1;
938 
939 	dec->link_dec = link_dec;
940 	dev_info(dev, "link mode probe finish\n");
941 
942 done:
943 	if (ret) {
944 		if (link_dec) {
945 			if (link_dec->reg_base) {
946 				devm_iounmap(dev, link_dec->reg_base);
947 				link_dec->reg_base = NULL;
948 			}
949 			if (link_dec->tasks_hw) {
950 				devm_kfree(dev, link_dec->tasks_hw);
951 				link_dec->tasks_hw = NULL;
952 			}
953 
954 			devm_kfree(dev, link_dec);
955 			link_dec = NULL;
956 		}
957 		dec->link_dec = NULL;
958 	}
959 	mpp_debug_leave();
960 
961 	return ret;
962 }
963 
rkvdec2_link_free_task(struct kref * ref)964 static void rkvdec2_link_free_task(struct kref *ref)
965 {
966 	struct mpp_dev *mpp;
967 	struct mpp_session *session;
968 	struct mpp_task *task = container_of(ref, struct mpp_task, ref);
969 
970 	if (!task->session) {
971 		mpp_err("task %d task->session is null.\n", task->task_index);
972 		return;
973 	}
974 	session = task->session;
975 
976 	mpp_debug_func(DEBUG_TASK_INFO, "task %d:%d state 0x%lx\n",
977 		       session->index, task->task_index, task->state);
978 	if (!session->mpp) {
979 		mpp_err("session %d session->mpp is null.\n", session->index);
980 		return;
981 	}
982 	mpp = session->mpp;
983 	list_del_init(&task->queue_link);
984 
985 	rkvdec2_free_task(session, task);
986 	/* Decrease reference count */
987 	atomic_dec(&session->task_count);
988 	atomic_dec(&mpp->task_count);
989 }
990 
rkvdec2_link_trigger_work(struct mpp_dev * mpp)991 static void rkvdec2_link_trigger_work(struct mpp_dev *mpp)
992 {
993 	kthread_queue_work(&mpp->queue->worker, &mpp->work);
994 }
995 
rkvdec2_link_trigger_timeout(struct mpp_dev * mpp)996 static void rkvdec2_link_trigger_timeout(struct mpp_dev *mpp)
997 {
998 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
999 	struct rkvdec_link_dev *link_dec = dec->link_dec;
1000 
1001 	atomic_inc(&link_dec->task_timeout);
1002 	rkvdec2_link_trigger_work(mpp);
1003 }
1004 
rkvdec2_link_trigger_irq(struct mpp_dev * mpp)1005 static void rkvdec2_link_trigger_irq(struct mpp_dev *mpp)
1006 {
1007 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1008 	struct rkvdec_link_dev *link_dec = dec->link_dec;
1009 
1010 	link_dec->task_irq++;
1011 	rkvdec2_link_trigger_work(mpp);
1012 }
1013 
rkvdec2_link_power_on(struct mpp_dev * mpp)1014 static int rkvdec2_link_power_on(struct mpp_dev *mpp)
1015 {
1016 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1017 	struct rkvdec_link_dev *link_dec = dec->link_dec;
1018 
1019 	if (!atomic_xchg(&link_dec->power_enabled, 1)) {
1020 		if (mpp_iommu_attach(mpp->iommu_info)) {
1021 			dev_err(mpp->dev, "mpp_iommu_attach failed\n");
1022 			return -ENODATA;
1023 		}
1024 		pm_runtime_get_sync(mpp->dev);
1025 		pm_stay_awake(mpp->dev);
1026 
1027 		if (mpp->hw_ops->clk_on)
1028 			mpp->hw_ops->clk_on(mpp);
1029 
1030 		if (!link_dec->irq_enabled) {
1031 			enable_irq(mpp->irq);
1032 			link_dec->irq_enabled = 1;
1033 		}
1034 	}
1035 	return 0;
1036 }
1037 
rkvdec2_link_power_off(struct mpp_dev * mpp)1038 static void rkvdec2_link_power_off(struct mpp_dev *mpp)
1039 {
1040 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1041 	struct rkvdec_link_dev *link_dec = dec->link_dec;
1042 
1043 	if (atomic_xchg(&link_dec->power_enabled, 0)) {
1044 		disable_irq(mpp->irq);
1045 		link_dec->irq_enabled = 0;
1046 
1047 		if (mpp->hw_ops->clk_off)
1048 			mpp->hw_ops->clk_off(mpp);
1049 
1050 		pm_relax(mpp->dev);
1051 		pm_runtime_put_sync_suspend(mpp->dev);
1052 
1053 		link_dec->task_decoded = 0;
1054 		link_dec->task_total = 0;
1055 	}
1056 }
1057 
rkvdec2_link_timeout_proc(struct work_struct * work_s)1058 static void rkvdec2_link_timeout_proc(struct work_struct *work_s)
1059 {
1060 	struct mpp_dev *mpp;
1061 	struct mpp_session *session;
1062 	struct mpp_task *task = container_of(to_delayed_work(work_s),
1063 					     struct mpp_task, timeout_work);
1064 
1065 	if (test_and_set_bit(TASK_STATE_HANDLE, &task->state)) {
1066 		mpp_err("task %d state %lx has been handled\n",
1067 			task->task_index, task->state);
1068 		return;
1069 	}
1070 
1071 	if (!task->session) {
1072 		mpp_err("task %d session is null.\n", task->task_index);
1073 		return;
1074 	}
1075 	session = task->session;
1076 
1077 	if (!session->mpp) {
1078 		mpp_err("task %d:%d mpp is null.\n", session->index,
1079 			task->task_index);
1080 		return;
1081 	}
1082 	mpp = session->mpp;
1083 	set_bit(TASK_STATE_TIMEOUT, &task->state);
1084 	rkvdec2_link_trigger_timeout(mpp);
1085 }
1086 
mpp_taskqueue_scan_pending_abort_task(struct mpp_taskqueue * queue)1087 static void mpp_taskqueue_scan_pending_abort_task(struct mpp_taskqueue *queue)
1088 {
1089 	struct mpp_task *task, *n;
1090 
1091 	mutex_lock(&queue->pending_lock);
1092 	/* Check and pop all timeout task */
1093 	list_for_each_entry_safe(task, n, &queue->pending_list, queue_link) {
1094 		struct mpp_session *session = task->session;
1095 
1096 		if (test_bit(TASK_STATE_ABORT, &task->state)) {
1097 			mutex_lock(&session->pending_lock);
1098 			/* wait and signal */
1099 			list_del_init(&task->queue_link);
1100 			mutex_unlock(&session->pending_lock);
1101 			kref_put(&task->ref, rkvdec2_link_free_task);
1102 		}
1103 	}
1104 	mutex_unlock(&queue->pending_lock);
1105 }
1106 
rkvdec2_link_try_dequeue(struct mpp_dev * mpp)1107 static void rkvdec2_link_try_dequeue(struct mpp_dev *mpp)
1108 {
1109 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1110 	struct rkvdec_link_dev *link_dec = dec->link_dec;
1111 	struct mpp_task *task;
1112 	struct mpp_taskqueue *queue = mpp->queue;
1113 	int task_irq = link_dec->task_irq;
1114 	int task_irq_prev = link_dec->task_irq_prev;
1115 	int task_timeout = atomic_read(&link_dec->task_timeout);
1116 
1117 	if (!link_dec->task_running)
1118 		goto done;
1119 
1120 	if (task_timeout != link_dec->task_timeout_prev) {
1121 		dev_info(link_dec->dev, "process task timeout\n");
1122 		atomic_inc(&mpp->reset_request);
1123 		link_dec->task_on_timeout =
1124 			task_timeout - link_dec->task_timeout_prev;
1125 		goto proc;
1126 	}
1127 
1128 	if (task_irq == task_irq_prev)
1129 		goto done;
1130 
1131 	if (!atomic_read(&link_dec->power_enabled)) {
1132 		dev_info(link_dec->dev, "dequeue on power off\n");
1133 		goto done;
1134 	}
1135 
1136 proc:
1137 	task = list_first_entry_or_null(&queue->running_list, struct mpp_task,
1138 					queue_link);
1139 	if (!task) {
1140 		mpp_err("can found task on trydequeue with %d running task\n",
1141 			link_dec->task_running);
1142 		goto done;
1143 	}
1144 
1145 	/* Check and process all finished task */
1146 	rkvdec2_link_isr(mpp);
1147 
1148 done:
1149 	link_dec->task_irq_prev = task_irq;
1150 	link_dec->task_timeout_prev = task_timeout;
1151 	link_dec->task_on_timeout = 0;
1152 
1153 	mpp_taskqueue_scan_pending_abort_task(queue);
1154 
1155 	/* TODO: if reset is needed do reset here */
1156 }
1157 
mpp_task_queue(struct mpp_dev * mpp,struct mpp_task * task)1158 static int mpp_task_queue(struct mpp_dev *mpp, struct mpp_task *task)
1159 {
1160 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1161 	struct rkvdec_link_dev *link_dec = dec->link_dec;
1162 	u32 task_to_run = 0;
1163 	int slot_idx = 0;
1164 
1165 	mpp_debug_enter();
1166 
1167 	rkvdec2_link_power_on(mpp);
1168 	mpp_time_record(task);
1169 	mpp_debug(DEBUG_TASK_INFO, "pid %d, start hw %s\n",
1170 		  task->session->pid, dev_name(mpp->dev));
1171 
1172 	/* prepare the task for running */
1173 	if (test_and_set_bit(TASK_STATE_PREPARE, &task->state))
1174 		mpp_err("task %d has been prepare twice\n", task->task_index);
1175 
1176 	rkvdec2_link_prepare(mpp, task);
1177 
1178 	task_to_run = link_dec->task_to_run;
1179 	if (!task_to_run) {
1180 		dev_err(link_dec->dev, "nothing to run\n");
1181 		goto done;
1182 	}
1183 
1184 	mpp_reset_down_read(mpp->reset_group);
1185 	link_dec->task_to_run = 0;
1186 	slot_idx = rkvdec_link_get_task_send(link_dec);
1187 	link_dec->task_running += task_to_run;
1188 	rkvdec_link_send_task_to_hw(link_dec, task, slot_idx, task_to_run, 0);
1189 
1190 done:
1191 	mpp_debug_leave();
1192 
1193 	return 0;
1194 }
1195 
rkvdec2_link_irq_proc(int irq,void * param)1196 irqreturn_t rkvdec2_link_irq_proc(int irq, void *param)
1197 {
1198 	struct mpp_dev *mpp = param;
1199 	int ret = rkvdec2_link_irq(mpp);
1200 
1201 	if (!ret)
1202 		rkvdec2_link_trigger_irq(mpp);
1203 
1204 	return IRQ_HANDLED;
1205 }
1206 
1207 static struct mpp_task *
mpp_session_get_pending_task(struct mpp_session * session)1208 mpp_session_get_pending_task(struct mpp_session *session)
1209 {
1210 	struct mpp_task *task = NULL;
1211 
1212 	mutex_lock(&session->pending_lock);
1213 	task = list_first_entry_or_null(&session->pending_list, struct mpp_task,
1214 					pending_link);
1215 	mutex_unlock(&session->pending_lock);
1216 
1217 	return task;
1218 }
1219 
task_is_done(struct mpp_task * task)1220 static int task_is_done(struct mpp_task *task)
1221 {
1222 	return test_bit(TASK_STATE_PROC_DONE, &task->state);
1223 }
1224 
mpp_session_pop_pending(struct mpp_session * session,struct mpp_task * task)1225 static int mpp_session_pop_pending(struct mpp_session *session,
1226 				   struct mpp_task *task)
1227 {
1228 	mutex_lock(&session->pending_lock);
1229 	list_del_init(&task->pending_link);
1230 	mutex_unlock(&session->pending_lock);
1231 	kref_put(&task->ref, rkvdec2_link_free_task);
1232 
1233 	return 0;
1234 }
1235 
mpp_session_pop_done(struct mpp_session * session,struct mpp_task * task)1236 static int mpp_session_pop_done(struct mpp_session *session,
1237 				struct mpp_task *task)
1238 {
1239 	set_bit(TASK_STATE_DONE, &task->state);
1240 	kref_put(&task->ref, rkvdec2_link_free_task);
1241 
1242 	return 0;
1243 }
1244 
rkvdec2_link_process_task(struct mpp_session * session,struct mpp_task_msgs * msgs)1245 int rkvdec2_link_process_task(struct mpp_session *session,
1246 			      struct mpp_task_msgs *msgs)
1247 {
1248 	struct mpp_task *task = NULL;
1249 	struct mpp_dev *mpp = session->mpp;
1250 
1251 	task = rkvdec2_alloc_task(session, msgs);
1252 	if (!task) {
1253 		mpp_err("alloc_task failed.\n");
1254 		return -ENOMEM;
1255 	}
1256 
1257 	kref_init(&task->ref);
1258 	atomic_set(&task->abort_request, 0);
1259 	task->task_index = atomic_fetch_inc(&mpp->task_index);
1260 	INIT_DELAYED_WORK(&task->timeout_work, rkvdec2_link_timeout_proc);
1261 
1262 	atomic_inc(&session->task_count);
1263 
1264 	kref_get(&task->ref);
1265 	mutex_lock(&session->pending_lock);
1266 	list_add_tail(&task->pending_link, &session->pending_list);
1267 	mutex_unlock(&session->pending_lock);
1268 
1269 	kref_get(&task->ref);
1270 	mutex_lock(&mpp->queue->pending_lock);
1271 	list_add_tail(&task->queue_link, &mpp->queue->pending_list);
1272 	mutex_unlock(&mpp->queue->pending_lock);
1273 
1274 	/* push current task to queue */
1275 	atomic_inc(&mpp->task_count);
1276 	set_bit(TASK_STATE_PENDING, &task->state);
1277 	/* trigger current queue to run task */
1278 	rkvdec2_link_trigger_work(mpp);
1279 	kref_put(&task->ref, rkvdec2_link_free_task);
1280 
1281 	return 0;
1282 }
1283 
rkvdec2_link_wait_result(struct mpp_session * session,struct mpp_task_msgs * msgs)1284 int rkvdec2_link_wait_result(struct mpp_session *session,
1285 			     struct mpp_task_msgs *msgs)
1286 {
1287 	struct mpp_dev *mpp = session->mpp;
1288 	struct mpp_task *mpp_task;
1289 	struct rkvdec2_task *task;
1290 	int ret;
1291 
1292 	mpp_task = mpp_session_get_pending_task(session);
1293 	if (!mpp_task) {
1294 		mpp_err("session %p pending list is empty!\n", session);
1295 		return -EIO;
1296 	}
1297 
1298 	task = to_rkvdec2_task(mpp_task);
1299 	ret = wait_event_timeout(task->wait, task_is_done(mpp_task),
1300 				 msecs_to_jiffies(WAIT_TIMEOUT_MS));
1301 	if (ret) {
1302 		ret = rkvdec2_result(mpp, mpp_task, msgs);
1303 
1304 		mpp_session_pop_done(session, mpp_task);
1305 	} else {
1306 		mpp_err("task %d:%d statue %lx timeout -> abort\n",
1307 			session->index, mpp_task->task_index, mpp_task->state);
1308 
1309 		atomic_inc(&mpp_task->abort_request);
1310 		set_bit(TASK_STATE_ABORT, &mpp_task->state);
1311 	}
1312 
1313 	mpp_session_pop_pending(session, mpp_task);
1314 	return ret;
1315 }
1316 
rkvdec2_link_worker(struct kthread_work * work_s)1317 void rkvdec2_link_worker(struct kthread_work *work_s)
1318 {
1319 	struct mpp_dev *mpp = container_of(work_s, struct mpp_dev, work);
1320 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1321 	struct rkvdec_link_dev *link_dec = dec->link_dec;
1322 	struct mpp_task *task;
1323 	struct mpp_taskqueue *queue = mpp->queue;
1324 
1325 	mpp_debug_enter();
1326 
1327 	/*
1328 	 * process timeout and finished task.
1329 	 */
1330 	rkvdec2_link_try_dequeue(mpp);
1331 
1332 again:
1333 	if (atomic_read(&mpp->reset_request)) {
1334 		if (link_dec->task_running || link_dec->task_prepared)
1335 			goto done;
1336 
1337 		disable_irq(mpp->irq);
1338 		rkvdec2_link_reset(mpp);
1339 		link_dec->task_decoded = 0;
1340 		link_dec->task_total = 0;
1341 		enable_irq(mpp->irq);
1342 	}
1343 	/*
1344 	 * process pending queue to find the task to accept.
1345 	 */
1346 	mutex_lock(&queue->pending_lock);
1347 	task = list_first_entry_or_null(&queue->pending_list, struct mpp_task,
1348 					queue_link);
1349 	mutex_unlock(&queue->pending_lock);
1350 	if (!task)
1351 		goto done;
1352 
1353 	if (test_bit(TASK_STATE_ABORT, &task->state)) {
1354 		struct rkvdec2_task *dec_task = to_rkvdec2_task(task);
1355 
1356 		mutex_lock(&queue->pending_lock);
1357 		list_del_init(&task->queue_link);
1358 
1359 		kref_get(&task->ref);
1360 		set_bit(TASK_STATE_ABORT_READY, &task->state);
1361 		set_bit(TASK_STATE_PROC_DONE, &task->state);
1362 
1363 		mutex_unlock(&queue->pending_lock);
1364 		wake_up(&dec_task->wait);
1365 		kref_put(&task->ref, rkvdec2_link_free_task);
1366 		goto again;
1367 	}
1368 
1369 	/*
1370 	 * if target device can accept more task send the task to run.
1371 	 */
1372 	if (link_dec->task_running >= link_dec->task_capacity - 2)
1373 		goto done;
1374 
1375 	if (mpp_task_queue(mpp, task)) {
1376 		/* failed to run */
1377 		mpp_err("%p failed to process task %p:%d\n",
1378 			mpp, task, task->task_index);
1379 	} else {
1380 		mutex_lock(&queue->pending_lock);
1381 		set_bit(TASK_STATE_RUNNING, &task->state);
1382 		list_move_tail(&task->queue_link, &queue->running_list);
1383 		mutex_unlock(&queue->pending_lock);
1384 		goto again;
1385 	}
1386 done:
1387 	mpp_debug_leave();
1388 
1389 	if (link_dec->task_irq != link_dec->task_irq_prev ||
1390 	    atomic_read(&link_dec->task_timeout) != link_dec->task_timeout_prev)
1391 		rkvdec2_link_trigger_work(mpp);
1392 
1393 	/* if no task for running power off device */
1394 	{
1395 		u32 all_done = 0;
1396 
1397 		mutex_lock(&queue->pending_lock);
1398 		all_done = list_empty(&queue->pending_list);
1399 		mutex_unlock(&queue->pending_lock);
1400 
1401 		if (all_done && !link_dec->task_running && !link_dec->task_prepared)
1402 			rkvdec2_link_power_off(mpp);
1403 	}
1404 
1405 	mutex_lock(&queue->session_lock);
1406 	while (queue->detach_count) {
1407 		struct mpp_session *session = NULL;
1408 
1409 		session = list_first_entry_or_null(&queue->session_detach, struct mpp_session,
1410 				session_link);
1411 		if (session) {
1412 			list_del_init(&session->session_link);
1413 			queue->detach_count--;
1414 		}
1415 
1416 		mutex_unlock(&queue->session_lock);
1417 
1418 		if (session) {
1419 			mpp_dbg_session("%s detach count %d\n", dev_name(mpp->dev),
1420 					queue->detach_count);
1421 			mpp_session_deinit(session);
1422 		}
1423 
1424 		mutex_lock(&queue->session_lock);
1425 	}
1426 	mutex_unlock(&queue->session_lock);
1427 }
1428 
rkvdec2_link_session_deinit(struct mpp_session * session)1429 void rkvdec2_link_session_deinit(struct mpp_session *session)
1430 {
1431 	struct mpp_dev *mpp = session->mpp;
1432 
1433 	mpp_debug_enter();
1434 
1435 	rkvdec2_free_session(session);
1436 
1437 	if (session->dma) {
1438 		mpp_dbg_session("session %d destroy dma\n", session->index);
1439 		mpp_iommu_down_read(mpp->iommu_info);
1440 		mpp_dma_session_destroy(session->dma);
1441 		mpp_iommu_up_read(mpp->iommu_info);
1442 		session->dma = NULL;
1443 	}
1444 	if (session->srv) {
1445 		struct mpp_service *srv = session->srv;
1446 
1447 		mutex_lock(&srv->session_lock);
1448 		list_del_init(&session->service_link);
1449 		mutex_unlock(&srv->session_lock);
1450 	}
1451 	list_del_init(&session->session_link);
1452 
1453 	mpp_dbg_session("session %d release\n", session->index);
1454 
1455 	mpp_debug_leave();
1456 }
1457 
rkvdec2_attach_ccu(struct device * dev,struct rkvdec2_dev * dec)1458 int rkvdec2_attach_ccu(struct device *dev, struct rkvdec2_dev *dec)
1459 {
1460 	int ret;
1461 	struct device_node *np;
1462 	struct platform_device *pdev;
1463 	struct rkvdec2_ccu *ccu;
1464 	struct mpp_taskqueue *queue;
1465 
1466 	mpp_debug_enter();
1467 
1468 	np = of_parse_phandle(dev->of_node, "rockchip,ccu", 0);
1469 	if (!np || !of_device_is_available(np))
1470 		return -ENODEV;
1471 
1472 	pdev = of_find_device_by_node(np);
1473 	of_node_put(np);
1474 	if (!pdev)
1475 		return -ENODEV;
1476 
1477 	ccu = platform_get_drvdata(pdev);
1478 	if (!ccu)
1479 		return -ENOMEM;
1480 
1481 	ret = of_property_read_u32(dev->of_node, "rockchip,core-mask", &dec->core_mask);
1482 	if (ret)
1483 		return ret;
1484 	dev_info(dev, "core_mask=%08x\n", dec->core_mask);
1485 
1486 	/* if not the main-core, then attach the main core domain to current */
1487 	queue = dec->mpp.queue;
1488 	if (&dec->mpp != queue->cores[0]) {
1489 		struct mpp_iommu_info *ccu_info, *cur_info;
1490 
1491 		/* set the ccu-domain for current device */
1492 		ccu_info = queue->cores[0]->iommu_info;
1493 		cur_info = dec->mpp.iommu_info;
1494 		cur_info->domain = ccu_info->domain;
1495 		mpp_iommu_attach(cur_info);
1496 	}
1497 
1498 	dec->ccu = ccu;
1499 
1500 	dev_info(dev, "attach ccu as core %d\n", dec->mpp.core_id);
1501 	mpp_debug_enter();
1502 
1503 	return 0;
1504 }
1505 
rkvdec2_ccu_link_timeout_work(struct work_struct * work_s)1506 static void rkvdec2_ccu_link_timeout_work(struct work_struct *work_s)
1507 {
1508 	struct mpp_dev *mpp;
1509 	struct mpp_session *session;
1510 	struct mpp_task *task = container_of(to_delayed_work(work_s),
1511 					     struct mpp_task, timeout_work);
1512 
1513 	if (test_and_set_bit(TASK_STATE_HANDLE, &task->state)) {
1514 		mpp_err("task %d state %lx has been handled\n",
1515 			task->task_index, task->state);
1516 		return;
1517 	}
1518 
1519 	if (!task->session) {
1520 		mpp_err("task %d session is null.\n", task->task_index);
1521 		return;
1522 	}
1523 	session = task->session;
1524 
1525 	if (!session->mpp) {
1526 		mpp_err("task %d:%d mpp is null.\n", session->index,
1527 			task->task_index);
1528 		return;
1529 	}
1530 	mpp = task->mpp ? task->mpp : session->mpp;
1531 	mpp_err("task timeout\n");
1532 	set_bit(TASK_STATE_TIMEOUT, &task->state);
1533 	atomic_inc(&mpp->reset_request);
1534 	atomic_inc(&mpp->queue->reset_request);
1535 	kthread_queue_work(&mpp->queue->worker, &mpp->work);
1536 }
1537 
rkvdec2_ccu_link_init(struct platform_device * pdev,struct rkvdec2_dev * dec)1538 int rkvdec2_ccu_link_init(struct platform_device *pdev, struct rkvdec2_dev *dec)
1539 {
1540 	struct resource *res;
1541 	struct rkvdec_link_dev *link_dec;
1542 	struct device *dev = &pdev->dev;
1543 
1544 	mpp_debug_enter();
1545 
1546 	/* link structure */
1547 	link_dec = devm_kzalloc(dev, sizeof(*link_dec), GFP_KERNEL);
1548 	if (!link_dec)
1549 		return -ENOMEM;
1550 
1551 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "link");
1552 	if (!res)
1553 		return -ENOMEM;
1554 
1555 	link_dec->info = &rkvdec_link_v2_hw_info;
1556 	link_dec->reg_base = devm_ioremap(dev, res->start, resource_size(res));
1557 	if (!link_dec->reg_base) {
1558 		dev_err(dev, "ioremap failed for resource %pR\n", res);
1559 		return -ENOMEM;
1560 	}
1561 
1562 	dec->link_dec = link_dec;
1563 
1564 	mpp_debug_leave();
1565 
1566 	return 0;
1567 }
1568 
rkvdec2_ccu_link_session_detach(struct mpp_dev * mpp,struct mpp_taskqueue * queue)1569 static int rkvdec2_ccu_link_session_detach(struct mpp_dev *mpp,
1570 					   struct mpp_taskqueue *queue)
1571 {
1572 	mutex_lock(&queue->session_lock);
1573 	while (queue->detach_count) {
1574 		struct mpp_session *session = NULL;
1575 
1576 		session = list_first_entry_or_null(&queue->session_detach,
1577 						   struct mpp_session,
1578 						   session_link);
1579 		if (session) {
1580 			list_del_init(&session->session_link);
1581 			queue->detach_count--;
1582 		}
1583 
1584 		mutex_unlock(&queue->session_lock);
1585 
1586 		if (session) {
1587 			mpp_dbg_session("%s detach count %d\n", dev_name(mpp->dev),
1588 					queue->detach_count);
1589 			mpp_session_deinit(session);
1590 		}
1591 
1592 		mutex_lock(&queue->session_lock);
1593 	}
1594 	mutex_unlock(&queue->session_lock);
1595 
1596 	return 0;
1597 }
1598 
rkvdec2_ccu_power_on(struct mpp_taskqueue * queue,struct rkvdec2_ccu * ccu)1599 static int rkvdec2_ccu_power_on(struct mpp_taskqueue *queue,
1600 					  struct rkvdec2_ccu *ccu)
1601 {
1602 	if (!atomic_xchg(&ccu->power_enabled, 1)) {
1603 		u32 i;
1604 		struct mpp_dev *mpp;
1605 
1606 		/* ccu pd and clk on */
1607 		pm_runtime_get_sync(ccu->dev);
1608 		pm_stay_awake(ccu->dev);
1609 		mpp_clk_safe_enable(ccu->aclk_info.clk);
1610 		/* core pd and clk on */
1611 		for (i = 0; i < queue->core_count; i++) {
1612 			mpp = queue->cores[i];
1613 			pm_runtime_get_sync(mpp->dev);
1614 			pm_stay_awake(mpp->dev);
1615 			if (mpp->hw_ops->clk_on)
1616 				mpp->hw_ops->clk_on(mpp);
1617 		}
1618 		mpp_debug(DEBUG_CCU, "power on\n");
1619 	}
1620 
1621 	return 0;
1622 }
1623 
rkvdec2_ccu_power_off(struct mpp_taskqueue * queue,struct rkvdec2_ccu * ccu)1624 static int rkvdec2_ccu_power_off(struct mpp_taskqueue *queue,
1625 					   struct rkvdec2_ccu *ccu)
1626 {
1627 	if (atomic_xchg(&ccu->power_enabled, 0)) {
1628 		u32 i;
1629 		struct mpp_dev *mpp;
1630 
1631 		/* ccu pd and clk off */
1632 		mpp_clk_safe_disable(ccu->aclk_info.clk);
1633 		pm_relax(ccu->dev);
1634 		pm_runtime_mark_last_busy(ccu->dev);
1635 		pm_runtime_put_autosuspend(ccu->dev);
1636 		/* core pd and clk off */
1637 		for (i = 0; i < queue->core_count; i++) {
1638 			mpp = queue->cores[i];
1639 
1640 			if (mpp->hw_ops->clk_off)
1641 				mpp->hw_ops->clk_off(mpp);
1642 			pm_relax(mpp->dev);
1643 			pm_runtime_mark_last_busy(mpp->dev);
1644 			pm_runtime_put_autosuspend(mpp->dev);
1645 		}
1646 		mpp_debug(DEBUG_CCU, "power off\n");
1647 	}
1648 
1649 	return 0;
1650 }
1651 
rkvdec2_soft_ccu_dequeue(struct mpp_taskqueue * queue)1652 static int rkvdec2_soft_ccu_dequeue(struct mpp_taskqueue *queue)
1653 {
1654 	struct mpp_task *mpp_task = NULL, *n;
1655 
1656 	mpp_debug_enter();
1657 
1658 	list_for_each_entry_safe(mpp_task, n,
1659 				 &queue->running_list,
1660 				 queue_link) {
1661 		struct mpp_dev *mpp = mpp_task->mpp ? mpp_task->mpp : mpp_task->session->mpp;
1662 		u32 irq_status = mpp->irq_status;
1663 		u32 timeout_flag = test_bit(TASK_STATE_TIMEOUT, &mpp_task->state);
1664 		u32 abort_flag = test_bit(TASK_STATE_ABORT, &mpp_task->state);
1665 
1666 		if (irq_status || timeout_flag || abort_flag) {
1667 			struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
1668 
1669 			set_bit(TASK_STATE_HANDLE, &mpp_task->state);
1670 			cancel_delayed_work(&mpp_task->timeout_work);
1671 			mpp_time_diff(mpp_task);
1672 			task->irq_status = irq_status;
1673 			mpp_debug(DEBUG_IRQ_CHECK, "irq_status=%08x, timeout=%u, abort=%u\n",
1674 				  irq_status, timeout_flag, abort_flag);
1675 			if (mpp->dev_ops->finish)
1676 				mpp->dev_ops->finish(mpp, mpp_task);
1677 			set_bit(TASK_STATE_FINISH, &mpp_task->state);
1678 			set_bit(TASK_STATE_DONE, &mpp_task->state);
1679 
1680 			set_bit(mpp->core_id, &queue->core_idle);
1681 			mpp_dbg_core("set core %d idle %lx\n", mpp->core_id, queue->core_idle);
1682 			/* Wake up the GET thread */
1683 			wake_up(&mpp_task->wait);
1684 			/* free task */
1685 			list_del_init(&mpp_task->queue_link);
1686 			kref_put(&mpp_task->ref, mpp_free_task);
1687 		} else {
1688 			/* NOTE: break when meet not finish */
1689 			break;
1690 		}
1691 	}
1692 
1693 	mpp_debug_leave();
1694 	return 0;
1695 }
1696 
rkvdec2_soft_ccu_reset(struct mpp_taskqueue * queue)1697 static int rkvdec2_soft_ccu_reset(struct mpp_taskqueue *queue)
1698 {
1699 	u32 i = 0;
1700 
1701 	for (i = 0; i < queue->core_count; i++) {
1702 		struct mpp_dev *mpp = queue->cores[i];
1703 		struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1704 
1705 		if (dec->disable_work)
1706 			continue;
1707 		if (!atomic_read(&mpp->reset_request))
1708 			continue;
1709 		dev_info(mpp->dev, "resetting...\n");
1710 		disable_hardirq(mpp->irq);
1711 
1712 		/* foce idle, disconnect core and ccu */
1713 		writel(dec->core_mask, dec->ccu->reg_base + RKVDEC_CCU_CORE_IDLE_BASE);
1714 		rkvdec2_reset(mpp);
1715 
1716 		/* clear error mask */
1717 		writel_relaxed(dec->core_mask & RKVDEC_CCU_CORE_RW_MASK,
1718 			       dec->ccu->reg_base + RKVDEC_CCU_CORE_ERR_BASE);
1719 		mpp_iommu_refresh(mpp->iommu_info, mpp->dev);
1720 		atomic_set(&mpp->reset_request, 0);
1721 
1722 		/* connect core and ccu */
1723 		writel(dec->core_mask & RKVDEC_CCU_CORE_RW_MASK,
1724 		       dec->ccu->reg_base + RKVDEC_CCU_CORE_IDLE_BASE);
1725 
1726 		enable_irq(mpp->irq);
1727 		dev_info(mpp->dev, "reset done\n");
1728 	}
1729 	atomic_set(&queue->reset_request, 0);
1730 
1731 	return 0;
1732 }
1733 
rkvdec2_ccu_alloc_task(struct mpp_session * session,struct mpp_task_msgs * msgs)1734 void *rkvdec2_ccu_alloc_task(struct mpp_session *session,
1735 				  struct mpp_task_msgs *msgs)
1736 {
1737 	int ret;
1738 	struct rkvdec2_task *task;
1739 
1740 	task = kzalloc(sizeof(*task), GFP_KERNEL);
1741 	if (!task)
1742 		return NULL;
1743 
1744 	ret = rkvdec2_task_init(session->mpp, session, task, msgs);
1745 	if (ret) {
1746 		kfree(task);
1747 		return NULL;
1748 	}
1749 
1750 	return &task->mpp_task;
1751 }
1752 
rkvdec2_soft_ccu_irq(int irq,void * param)1753 irqreturn_t rkvdec2_soft_ccu_irq(int irq, void *param)
1754 {
1755 	struct mpp_dev *mpp = param;
1756 	u32 irq_status = mpp_read_relaxed(mpp, RKVDEC_REG_INT_EN);
1757 
1758 	if (irq_status & RKVDEC_IRQ_RAW) {
1759 		mpp_debug(DEBUG_IRQ_STATUS, "irq_status=%08x\n", irq_status);
1760 		if (irq_status & RKVDEC_INT_ERROR_MASK) {
1761 			atomic_inc(&mpp->reset_request);
1762 			atomic_inc(&mpp->queue->reset_request);
1763 		}
1764 		mpp_write(mpp, RKVDEC_REG_INT_EN, 0);
1765 		mpp->irq_status = irq_status;
1766 		kthread_queue_work(&mpp->queue->worker, &mpp->work);
1767 		return IRQ_HANDLED;
1768 	}
1769 	return IRQ_NONE;
1770 }
1771 
rkvdec2_set_core_info(u32 * reg,int idx)1772 static inline int rkvdec2_set_core_info(u32 *reg, int idx)
1773 {
1774 	u32 val = (idx << 16) & RKVDEC_REG_FILM_IDX_MASK;
1775 
1776 	reg[RKVDEC_REG_CORE_CTRL_INDEX] &= ~RKVDEC_REG_FILM_IDX_MASK;
1777 
1778 	reg[RKVDEC_REG_CORE_CTRL_INDEX] |= val;
1779 
1780 	return 0;
1781 }
1782 
rkvdec2_soft_ccu_enqueue(struct mpp_dev * mpp,struct mpp_task * mpp_task)1783 static int rkvdec2_soft_ccu_enqueue(struct mpp_dev *mpp, struct mpp_task *mpp_task)
1784 {
1785 	u32 i, reg_en, reg;
1786 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1787 	struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
1788 
1789 	mpp_debug_enter();
1790 
1791 	/* set reg for link */
1792 	reg = RKVDEC_LINK_BIT_CORE_WORK_MODE | RKVDEC_LINK_BIT_CCU_WORK_MODE;
1793 	writel_relaxed(reg, dec->link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
1794 
1795 	/* set reg for ccu */
1796 	writel_relaxed(RKVDEC_CCU_BIT_WORK_EN, dec->ccu->reg_base + RKVDEC_CCU_WORK_BASE);
1797 	writel_relaxed(RKVDEC_CCU_BIT_WORK_MODE, dec->ccu->reg_base + RKVDEC_CCU_WORK_MODE_BASE);
1798 	writel_relaxed(dec->core_mask, dec->ccu->reg_base + RKVDEC_CCU_CORE_WORK_BASE);
1799 
1800 	/* set cache size */
1801 	reg = RKVDEC_CACHE_PERMIT_CACHEABLE_ACCESS |
1802 		  RKVDEC_CACHE_PERMIT_READ_ALLOCATE;
1803 	if (!mpp_debug_unlikely(DEBUG_CACHE_32B))
1804 		reg |= RKVDEC_CACHE_LINE_SIZE_64_BYTES;
1805 
1806 	mpp_write_relaxed(mpp, RKVDEC_REG_CACHE0_SIZE_BASE, reg);
1807 	mpp_write_relaxed(mpp, RKVDEC_REG_CACHE1_SIZE_BASE, reg);
1808 	mpp_write_relaxed(mpp, RKVDEC_REG_CACHE2_SIZE_BASE, reg);
1809 	/* clear cache */
1810 	mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE0_BASE, 1);
1811 	mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE1_BASE, 1);
1812 	mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE2_BASE, 1);
1813 
1814 	mpp_iommu_flush_tlb(mpp->iommu_info);
1815 	/* set registers for hardware */
1816 	reg_en = mpp_task->hw_info->reg_en;
1817 	for (i = 0; i < task->w_req_cnt; i++) {
1818 		int s, e;
1819 		struct mpp_request *req = &task->w_reqs[i];
1820 
1821 		s = req->offset / sizeof(u32);
1822 		e = s + req->size / sizeof(u32);
1823 		mpp_write_req(mpp, task->reg, s, e, reg_en);
1824 	}
1825 	/* init current task */
1826 	mpp->cur_task = mpp_task;
1827 	mpp->irq_status = 0;
1828 	writel_relaxed(dec->core_mask, dec->ccu->reg_base + RKVDEC_CCU_CORE_STA_BASE);
1829 	/* Flush the register before the start the device */
1830 	wmb();
1831 	mpp_write(mpp, RKVDEC_REG_START_EN_BASE, task->reg[reg_en] | RKVDEC_START_EN);
1832 
1833 	mpp_debug_leave();
1834 
1835 	return 0;
1836 }
1837 
rkvdec2_get_idle_core(struct mpp_taskqueue * queue,struct mpp_task * mpp_task)1838 static struct mpp_dev *rkvdec2_get_idle_core(struct mpp_taskqueue *queue,
1839 					     struct mpp_task *mpp_task)
1840 {
1841 	u32 i = 0;
1842 	struct rkvdec2_dev *dec = NULL;
1843 
1844 	for (i = 0; i < queue->core_count; i++) {
1845 		struct rkvdec2_dev *core = to_rkvdec2_dev(queue->cores[i]);
1846 
1847 		if (core->disable_work)
1848 			continue;
1849 
1850 		if (test_bit(i, &queue->core_idle)) {
1851 			if (!dec) {
1852 				dec = core;
1853 				continue;
1854 			}
1855 			/* set the less work core */
1856 			if (core->task_index < dec->task_index)
1857 				dec = core;
1858 		}
1859 	}
1860 	/* if get core */
1861 	if (dec) {
1862 		mpp_task->mpp = &dec->mpp;
1863 		mpp_task->core_id = dec->mpp.core_id;
1864 		clear_bit(mpp_task->core_id, &queue->core_idle);
1865 		dec->task_index++;
1866 		mpp_dbg_core("clear core %d idle\n", mpp_task->core_id);
1867 		return mpp_task->mpp;
1868 	}
1869 
1870 	return NULL;
1871 }
1872 
rkvdec2_core_working(struct mpp_taskqueue * queue)1873 static bool rkvdec2_core_working(struct mpp_taskqueue *queue)
1874 {
1875 	u32 i = 0;
1876 	struct rkvdec2_dev *core;
1877 	bool flag = false;
1878 
1879 	for (i = 0; i < queue->core_count; i++) {
1880 		core = to_rkvdec2_dev(queue->cores[i]);
1881 		if (core->disable_work)
1882 			continue;
1883 		if (!test_bit(i, &queue->core_idle)) {
1884 			flag = true;
1885 			break;
1886 		}
1887 	}
1888 
1889 	return flag;
1890 }
1891 
rkvdec2_soft_ccu_worker(struct kthread_work * work_s)1892 void rkvdec2_soft_ccu_worker(struct kthread_work *work_s)
1893 {
1894 	struct mpp_task *mpp_task;
1895 	struct mpp_dev *mpp = container_of(work_s, struct mpp_dev, work);
1896 	struct mpp_taskqueue *queue = mpp->queue;
1897 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1898 
1899 	mpp_debug_enter();
1900 
1901 	/* process all finished task in running list */
1902 	rkvdec2_soft_ccu_dequeue(queue);
1903 
1904 	/* process reset request */
1905 	if (atomic_read(&queue->reset_request)) {
1906 		if (rkvdec2_core_working(queue))
1907 			goto done;
1908 		rkvdec2_soft_ccu_reset(queue);
1909 	}
1910 
1911 get_task:
1912 	/* get one task form pending list */
1913 	mutex_lock(&queue->pending_lock);
1914 	mpp_task = list_first_entry_or_null(&queue->pending_list,
1915 					    struct mpp_task, queue_link);
1916 	mutex_unlock(&queue->pending_lock);
1917 	if (!mpp_task)
1918 		goto done;
1919 
1920 	if (test_bit(TASK_STATE_ABORT, &mpp_task->state)) {
1921 		mutex_lock(&queue->pending_lock);
1922 		list_del_init(&mpp_task->queue_link);
1923 		mutex_unlock(&queue->pending_lock);
1924 		goto get_task;
1925 	}
1926 	/* find one core is idle */
1927 	mpp = rkvdec2_get_idle_core(queue, mpp_task);
1928 	if (!mpp)
1929 		goto done;
1930 
1931 	/* set session index */
1932 	rkvdec2_set_core_info(mpp_task->reg, mpp_task->session->index);
1933 	/* set rcb buffer */
1934 	mpp_set_rcbbuf(mpp, mpp_task->session, mpp_task);
1935 
1936 	/* pending to running */
1937 	mutex_lock(&queue->pending_lock);
1938 	list_move_tail(&mpp_task->queue_link, &queue->running_list);
1939 	mutex_unlock(&queue->pending_lock);
1940 	set_bit(TASK_STATE_RUNNING, &mpp_task->state);
1941 
1942 	mpp_time_record(mpp_task);
1943 	mpp_debug(DEBUG_TASK_INFO, "pid %d, start hw %s\n",
1944 		  mpp_task->session->pid, dev_name(mpp->dev));
1945 	set_bit(TASK_STATE_START, &mpp_task->state);
1946 	INIT_DELAYED_WORK(&mpp_task->timeout_work, rkvdec2_ccu_link_timeout_work);
1947 	schedule_delayed_work(&mpp_task->timeout_work, msecs_to_jiffies(WORK_TIMEOUT_MS));
1948 	rkvdec2_ccu_power_on(queue, dec->ccu);
1949 	rkvdec2_soft_ccu_enqueue(mpp, mpp_task);
1950 done:
1951 	if (list_empty(&queue->running_list))
1952 		rkvdec2_ccu_power_off(queue, dec->ccu);
1953 	/* session detach out of queue */
1954 	rkvdec2_ccu_link_session_detach(mpp, queue);
1955 
1956 	mpp_debug_leave();
1957 }
1958