• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2 /*
3  * Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd
4  *
5  * author:
6  *	Alpha Lin, alpha.lin@rock-chips.com
7  *	Randy Li, randy.li@rock-chips.com
8  *	Ding Wei, leo.ding@rock-chips.com
9  *
10  */
11 #include <asm/cacheflush.h>
12 #include <linux/clk.h>
13 #include <linux/delay.h>
14 #include <linux/devfreq.h>
15 #include <linux/devfreq_cooling.h>
16 #include <linux/gfp.h>
17 #include <linux/interrupt.h>
18 #include <linux/iopoll.h>
19 #include <linux/module.h>
20 #include <linux/types.h>
21 #include <linux/of_platform.h>
22 #include <linux/slab.h>
23 #include <linux/uaccess.h>
24 #include <linux/regmap.h>
25 #include <linux/kernel.h>
26 #include <linux/thermal.h>
27 #include <linux/notifier.h>
28 #include <linux/proc_fs.h>
29 #include <linux/rockchip/rockchip_sip.h>
30 #include <linux/regulator/consumer.h>
31 
32 #include <soc/rockchip/pm_domains.h>
33 #include <soc/rockchip/rockchip_sip.h>
34 #include <soc/rockchip/rockchip_opp_select.h>
35 
36 #include "mpp_debug.h"
37 #include "mpp_common.h"
38 #include "mpp_iommu.h"
39 
40 #define RKVDEC_DRIVER_NAME		"mpp_rkvdec"
41 
42 #define IOMMU_GET_BUS_ID(x)		(((x) >> 6) & 0x1f)
43 #define IOMMU_PAGE_SIZE			SZ_4K
44 
45 #define	RKVDEC_SESSION_MAX_BUFFERS	40
46 /* The maximum registers number of all the version */
47 #define HEVC_DEC_REG_NUM		68
48 #define HEVC_DEC_REG_HW_ID_INDEX	0
49 #define HEVC_DEC_REG_START_INDEX	0
50 #define HEVC_DEC_REG_END_INDEX		67
51 
52 #define RKVDEC_V1_REG_NUM		78
53 #define RKVDEC_V1_REG_HW_ID_INDEX	0
54 #define RKVDEC_V1_REG_START_INDEX	0
55 #define RKVDEC_V1_REG_END_INDEX		77
56 
57 #define RKVDEC_V2_REG_NUM		109
58 #define RKVDEC_V2_REG_HW_ID_INDEX	0
59 #define RKVDEC_V2_REG_START_INDEX	0
60 #define RKVDEC_V2_REG_END_INDEX		108
61 
62 #define RKVDEC_REG_INT_EN		0x004
63 #define RKVDEC_REG_INT_EN_INDEX		(1)
64 #define RKVDEC_WR_DDR_ALIGN_EN		BIT(23)
65 #define RKVDEC_FORCE_SOFT_RESET_VALID	BIT(21)
66 #define RKVDEC_SOFTWARE_RESET_EN	BIT(20)
67 #define RKVDEC_INT_COLMV_REF_ERROR	BIT(17)
68 #define RKVDEC_INT_BUF_EMPTY		BIT(16)
69 #define RKVDEC_INT_TIMEOUT		BIT(15)
70 #define RKVDEC_INT_STRM_ERROR		BIT(14)
71 #define RKVDEC_INT_BUS_ERROR		BIT(13)
72 #define RKVDEC_DEC_INT_RAW		BIT(9)
73 #define RKVDEC_DEC_INT			BIT(8)
74 #define RKVDEC_DEC_TIMEOUT_EN		BIT(5)
75 #define RKVDEC_DEC_IRQ_DIS		BIT(4)
76 #define RKVDEC_CLOCK_GATE_EN		BIT(1)
77 #define RKVDEC_DEC_START		BIT(0)
78 
79 #define RKVDEC_REG_SYS_CTRL		0x008
80 #define RKVDEC_REG_SYS_CTRL_INDEX	(2)
81 #define RKVDEC_RGE_WIDTH_INDEX		(3)
82 #define RKVDEC_GET_FORMAT(x)		(((x) >> 20) & 0x3)
83 #define REVDEC_GET_PROD_NUM(x)		(((x) >> 16) & 0xffff)
84 #define RKVDEC_GET_WIDTH(x)		(((x) & 0x3ff) << 4)
85 #define RKVDEC_FMT_H265D		(0)
86 #define RKVDEC_FMT_H264D		(1)
87 #define RKVDEC_FMT_VP9D			(2)
88 
89 #define RKVDEC_REG_RLC_BASE		0x010
90 #define RKVDEC_REG_RLC_BASE_INDEX	(4)
91 
92 #define RKVDEC_RGE_YSTRDE_INDEX		(8)
93 #define RKVDEC_GET_YSTRDE(x)		(((x) & 0x1fffff) << 4)
94 
95 #define RKVDEC_REG_PPS_BASE		0x0a0
96 #define RKVDEC_REG_PPS_BASE_INDEX	(42)
97 
98 #define RKVDEC_REG_VP9_REFCOLMV_BASE		0x0d0
99 #define RKVDEC_REG_VP9_REFCOLMV_BASE_INDEX	(52)
100 
101 #define RKVDEC_REG_CACHE0_SIZE_BASE	0x41c
102 #define RKVDEC_REG_CACHE1_SIZE_BASE	0x45c
103 #define RKVDEC_REG_CLR_CACHE0_BASE	0x410
104 #define RKVDEC_REG_CLR_CACHE1_BASE	0x450
105 
106 #define RKVDEC_CACHE_PERMIT_CACHEABLE_ACCESS	BIT(0)
107 #define RKVDEC_CACHE_PERMIT_READ_ALLOCATE	BIT(1)
108 #define RKVDEC_CACHE_LINE_SIZE_64_BYTES		BIT(4)
109 
110 #define RKVDEC_POWER_CTL_INDEX		(99)
111 #define RKVDEC_POWER_CTL_BASE		0x018c
112 
113 #define FALLBACK_STATIC_TEMPERATURE	55000
114 
115 #define to_rkvdec_task(task)		\
116 		container_of(task, struct rkvdec_task, mpp_task)
117 #define to_rkvdec_dev(dev)		\
118 		container_of(dev, struct rkvdec_dev, mpp)
119 
120 enum RKVDEC_MODE {
121 	RKVDEC_MODE_NONE,
122 	RKVDEC_MODE_ONEFRAME,
123 	RKVDEC_MODE_BUTT
124 };
125 
126 enum SET_CLK_EVENT {
127 	EVENT_POWER_ON = 0,
128 	EVENT_POWER_OFF,
129 	EVENT_ADJUST,
130 	EVENT_THERMAL,
131 	EVENT_BUTT,
132 };
133 
134 struct rkvdec_task {
135 	struct mpp_task mpp_task;
136 
137 	enum RKVDEC_MODE link_mode;
138 	enum MPP_CLOCK_MODE clk_mode;
139 	u32 reg[RKVDEC_V2_REG_NUM];
140 	struct reg_offset_info off_inf;
141 
142 	u32 strm_addr;
143 	u32 irq_status;
144 	/* req for current task */
145 	u32 w_req_cnt;
146 	struct mpp_request w_reqs[MPP_MAX_MSG_NUM];
147 	u32 r_req_cnt;
148 	struct mpp_request r_reqs[MPP_MAX_MSG_NUM];
149 	/* ystride info */
150 	u32 pixels;
151 };
152 
153 struct rkvdec_dev {
154 	struct mpp_dev mpp;
155 	/* sip smc reset lock */
156 	struct mutex sip_reset_lock;
157 
158 	struct mpp_clk_info aclk_info;
159 	struct mpp_clk_info hclk_info;
160 	struct mpp_clk_info core_clk_info;
161 	struct mpp_clk_info cabac_clk_info;
162 	struct mpp_clk_info hevc_cabac_clk_info;
163 	u32 default_max_load;
164 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
165 	struct proc_dir_entry *procfs;
166 #endif
167 	struct reset_control *rst_a;
168 	struct reset_control *rst_h;
169 	struct reset_control *rst_niu_a;
170 	struct reset_control *rst_niu_h;
171 	struct reset_control *rst_core;
172 	struct reset_control *rst_cabac;
173 	struct reset_control *rst_hevc_cabac;
174 
175 	unsigned long aux_iova;
176 	struct page *aux_page;
177 #ifdef CONFIG_PM_DEVFREQ
178 	struct regulator *vdd;
179 	struct devfreq *devfreq;
180 	struct devfreq *parent_devfreq;
181 	struct notifier_block devfreq_nb;
182 	struct thermal_cooling_device *devfreq_cooling;
183 	struct thermal_zone_device *thermal_zone;
184 	u32 static_power_coeff;
185 	s32 ts[4];
186 	/* set clk lock */
187 	struct mutex set_clk_lock;
188 	unsigned int thermal_div;
189 	unsigned long volt;
190 	unsigned long devf_aclk_rate_hz;
191 	unsigned long devf_core_rate_hz;
192 	unsigned long devf_cabac_rate_hz;
193 #endif
194 	/* record last infos */
195 	u32 last_fmt;
196 	bool had_reset;
197 	bool grf_changed;
198 };
199 
200 /*
201  * hardware information
202  */
203 static struct mpp_hw_info rk_hevcdec_hw_info = {
204 	.reg_num = HEVC_DEC_REG_NUM,
205 	.reg_id = HEVC_DEC_REG_HW_ID_INDEX,
206 	.reg_start = HEVC_DEC_REG_START_INDEX,
207 	.reg_end = HEVC_DEC_REG_END_INDEX,
208 	.reg_en = RKVDEC_REG_INT_EN_INDEX,
209 };
210 
211 static struct mpp_hw_info rkvdec_v1_hw_info = {
212 	.reg_num = RKVDEC_V1_REG_NUM,
213 	.reg_id = RKVDEC_V1_REG_HW_ID_INDEX,
214 	.reg_start = RKVDEC_V1_REG_START_INDEX,
215 	.reg_end = RKVDEC_V1_REG_END_INDEX,
216 	.reg_en = RKVDEC_REG_INT_EN_INDEX,
217 };
218 
219 /*
220  * file handle translate information
221  */
222 static const u16 trans_tbl_h264d[] = {
223 	4, 6, 7, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
224 	23, 24, 41, 42, 43, 48, 75
225 };
226 
227 static const u16 trans_tbl_h265d[] = {
228 	4, 6, 7, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
229 	23, 24, 42, 43
230 };
231 
232 static const u16 trans_tbl_vp9d[] = {
233 	4, 6, 7, 11, 12, 13, 14, 15, 16
234 };
235 
236 static struct mpp_trans_info rk_hevcdec_trans[] = {
237 	[RKVDEC_FMT_H265D] = {
238 		.count = ARRAY_SIZE(trans_tbl_h265d),
239 		.table = trans_tbl_h265d,
240 	},
241 };
242 
243 static struct mpp_trans_info rkvdec_v1_trans[] = {
244 	[RKVDEC_FMT_H265D] = {
245 		.count = ARRAY_SIZE(trans_tbl_h265d),
246 		.table = trans_tbl_h265d,
247 	},
248 	[RKVDEC_FMT_H264D] = {
249 		.count = ARRAY_SIZE(trans_tbl_h264d),
250 		.table = trans_tbl_h264d,
251 	},
252 	[RKVDEC_FMT_VP9D] = {
253 		.count = ARRAY_SIZE(trans_tbl_vp9d),
254 		.table = trans_tbl_vp9d,
255 	},
256 };
257 
258 #ifdef CONFIG_PM_DEVFREQ
rkvdec_devf_set_clk(struct rkvdec_dev * dec,unsigned long aclk_rate_hz,unsigned long core_rate_hz,unsigned long cabac_rate_hz,unsigned int event)259 static int rkvdec_devf_set_clk(struct rkvdec_dev *dec,
260 			       unsigned long aclk_rate_hz,
261 			       unsigned long core_rate_hz,
262 			       unsigned long cabac_rate_hz,
263 			       unsigned int event)
264 {
265 	struct clk *aclk = dec->aclk_info.clk;
266 	struct clk *clk_core = dec->core_clk_info.clk;
267 	struct clk *clk_cabac = dec->cabac_clk_info.clk;
268 
269 	mutex_lock(&dec->set_clk_lock);
270 
271 	switch (event) {
272 	case EVENT_POWER_ON:
273 		clk_set_rate(aclk, dec->devf_aclk_rate_hz);
274 		clk_set_rate(clk_core, dec->devf_core_rate_hz);
275 		clk_set_rate(clk_cabac, dec->devf_cabac_rate_hz);
276 		dec->thermal_div = 0;
277 		break;
278 	case EVENT_POWER_OFF:
279 		clk_set_rate(aclk, aclk_rate_hz);
280 		clk_set_rate(clk_core, core_rate_hz);
281 		clk_set_rate(clk_cabac, cabac_rate_hz);
282 		dec->thermal_div = 0;
283 		break;
284 	case EVENT_ADJUST:
285 		if (!dec->thermal_div) {
286 			clk_set_rate(aclk, aclk_rate_hz);
287 			clk_set_rate(clk_core, core_rate_hz);
288 			clk_set_rate(clk_cabac, cabac_rate_hz);
289 		} else {
290 			clk_set_rate(aclk,
291 				     aclk_rate_hz / dec->thermal_div);
292 			clk_set_rate(clk_core,
293 				     core_rate_hz / dec->thermal_div);
294 			clk_set_rate(clk_cabac,
295 				     cabac_rate_hz / dec->thermal_div);
296 		}
297 		dec->devf_aclk_rate_hz = aclk_rate_hz;
298 		dec->devf_core_rate_hz = core_rate_hz;
299 		dec->devf_cabac_rate_hz = cabac_rate_hz;
300 		break;
301 	case EVENT_THERMAL:
302 		dec->thermal_div = dec->devf_aclk_rate_hz / aclk_rate_hz;
303 		if (dec->thermal_div > 4)
304 			dec->thermal_div = 4;
305 		if (dec->thermal_div) {
306 			clk_set_rate(aclk,
307 				     dec->devf_aclk_rate_hz / dec->thermal_div);
308 			clk_set_rate(clk_core,
309 				     dec->devf_core_rate_hz / dec->thermal_div);
310 			clk_set_rate(clk_cabac,
311 				     dec->devf_cabac_rate_hz / dec->thermal_div);
312 		}
313 		break;
314 	}
315 
316 	mutex_unlock(&dec->set_clk_lock);
317 
318 	return 0;
319 }
320 
devfreq_target(struct device * dev,unsigned long * freq,u32 flags)321 static int devfreq_target(struct device *dev,
322 			  unsigned long *freq, u32 flags)
323 {
324 	int ret = 0;
325 	unsigned int clk_event;
326 	struct dev_pm_opp *opp;
327 	unsigned long target_volt, target_freq;
328 	unsigned long aclk_rate_hz, core_rate_hz, cabac_rate_hz;
329 
330 	struct rkvdec_dev *dec = dev_get_drvdata(dev);
331 	struct devfreq *devfreq = dec->devfreq;
332 	struct devfreq_dev_status *stat = &devfreq->last_status;
333 	unsigned long old_clk_rate = stat->current_frequency;
334 
335 	opp = devfreq_recommended_opp(dev, freq, flags);
336 	if (IS_ERR(opp)) {
337 		dev_err(dev, "Failed to find opp for %lu Hz\n", *freq);
338 		return PTR_ERR(opp);
339 	}
340 	target_freq = dev_pm_opp_get_freq(opp);
341 	target_volt = dev_pm_opp_get_voltage(opp);
342 	dev_pm_opp_put(opp);
343 
344 	if (target_freq < *freq) {
345 		clk_event = EVENT_THERMAL;
346 		aclk_rate_hz = target_freq;
347 		core_rate_hz = target_freq;
348 		cabac_rate_hz = target_freq;
349 	} else {
350 		clk_event = stat->busy_time ? EVENT_POWER_ON : EVENT_POWER_OFF;
351 		aclk_rate_hz = dec->devf_aclk_rate_hz;
352 		core_rate_hz = dec->devf_core_rate_hz;
353 		cabac_rate_hz = dec->devf_cabac_rate_hz;
354 	}
355 
356 	if (old_clk_rate == target_freq) {
357 		if (dec->volt == target_volt)
358 			return ret;
359 		ret = regulator_set_voltage(dec->vdd, target_volt, INT_MAX);
360 		if (ret) {
361 			dev_err(dev, "Cannot set voltage %lu uV\n",
362 				target_volt);
363 			return ret;
364 		}
365 		dec->volt = target_volt;
366 		return 0;
367 	}
368 
369 	if (old_clk_rate < target_freq) {
370 		ret = regulator_set_voltage(dec->vdd, target_volt, INT_MAX);
371 		if (ret) {
372 			dev_err(dev, "set voltage %lu uV\n", target_volt);
373 			return ret;
374 		}
375 	}
376 
377 	dev_dbg(dev, "%lu-->%lu\n", old_clk_rate, target_freq);
378 	rkvdec_devf_set_clk(dec, aclk_rate_hz, core_rate_hz, cabac_rate_hz, clk_event);
379 	stat->current_frequency = target_freq;
380 
381 	if (old_clk_rate > target_freq) {
382 		ret = regulator_set_voltage(dec->vdd, target_volt, INT_MAX);
383 		if (ret) {
384 			dev_err(dev, "set vol %lu uV\n", target_volt);
385 			return ret;
386 		}
387 	}
388 	dec->volt = target_volt;
389 
390 	return ret;
391 }
392 
devfreq_get_cur_freq(struct device * dev,unsigned long * freq)393 static int devfreq_get_cur_freq(struct device *dev,
394 				unsigned long *freq)
395 {
396 	struct rkvdec_dev *dec = dev_get_drvdata(dev);
397 
398 	*freq = clk_get_rate(dec->aclk_info.clk);
399 
400 	return 0;
401 }
402 
devfreq_get_dev_status(struct device * dev,struct devfreq_dev_status * stat)403 static int devfreq_get_dev_status(struct device *dev,
404 				  struct devfreq_dev_status *stat)
405 {
406 	struct rkvdec_dev *dec = dev_get_drvdata(dev);
407 	struct devfreq *devfreq = dec->devfreq;
408 
409 	memcpy(stat, &devfreq->last_status, sizeof(*stat));
410 
411 	return 0;
412 }
413 
414 static struct devfreq_dev_profile devfreq_profile = {
415 	.target	= devfreq_target,
416 	.get_cur_freq = devfreq_get_cur_freq,
417 	.get_dev_status	= devfreq_get_dev_status,
418 };
419 
420 static unsigned long
model_static_power(struct devfreq * devfreq,unsigned long voltage)421 model_static_power(struct devfreq *devfreq,
422 		   unsigned long voltage)
423 {
424 	struct device *dev = devfreq->dev.parent;
425 	struct rkvdec_dev *dec = dev_get_drvdata(dev);
426 	struct thermal_zone_device *tz = dec->thermal_zone;
427 
428 	int temperature;
429 	unsigned long temp;
430 	unsigned long temp_squared, temp_cubed, temp_scaling_factor;
431 	const unsigned long voltage_cubed = (voltage * voltage * voltage) >> 10;
432 
433 	if (!IS_ERR_OR_NULL(tz) && tz->ops->get_temp) {
434 		int ret;
435 
436 		ret = tz->ops->get_temp(tz, &temperature);
437 		if (ret) {
438 			dev_warn_ratelimited(dev, "ddr thermal zone failed\n");
439 			temperature = FALLBACK_STATIC_TEMPERATURE;
440 		}
441 	} else {
442 		temperature = FALLBACK_STATIC_TEMPERATURE;
443 	}
444 
445 	/*
446 	 * Calculate the temperature scaling factor. To be applied to the
447 	 * voltage scaled power.
448 	 */
449 	temp = temperature / 1000;
450 	temp_squared = temp * temp;
451 	temp_cubed = temp_squared * temp;
452 	temp_scaling_factor = (dec->ts[3] * temp_cubed)
453 	    + (dec->ts[2] * temp_squared) + (dec->ts[1] * temp) + dec->ts[0];
454 
455 	return (((dec->static_power_coeff * voltage_cubed) >> 20)
456 		* temp_scaling_factor) / 1000000;
457 }
458 
459 static struct devfreq_cooling_power cooling_power_data = {
460 	.get_static_power = model_static_power,
461 	.dyn_power_coeff = 120,
462 };
463 
power_model_simple_init(struct mpp_dev * mpp)464 static int power_model_simple_init(struct mpp_dev *mpp)
465 {
466 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
467 	struct device_node *np = mpp->dev->of_node;
468 
469 	u32 temp;
470 	const char *tz_name;
471 	struct device_node *power_model_node;
472 
473 	power_model_node = of_get_child_by_name(np, "vcodec_power_model");
474 	if (!power_model_node) {
475 		dev_err(mpp->dev, "could not find power_model node\n");
476 		return -ENODEV;
477 	}
478 
479 	if (of_property_read_string(power_model_node,
480 				    "thermal-zone",
481 				    &tz_name)) {
482 		dev_err(mpp->dev, "ts in power_model not available\n");
483 		return -EINVAL;
484 	}
485 
486 	dec->thermal_zone = thermal_zone_get_zone_by_name(tz_name);
487 	if (IS_ERR(dec->thermal_zone)) {
488 		pr_warn("Error getting ddr thermal zone, not yet ready?\n");
489 		dec->thermal_zone = NULL;
490 		return -EPROBE_DEFER;
491 	}
492 
493 	if (of_property_read_u32(power_model_node,
494 				 "static-power-coefficient",
495 				 &dec->static_power_coeff)) {
496 		dev_err(mpp->dev, "static-power-coefficient not available\n");
497 		return -EINVAL;
498 	}
499 	if (of_property_read_u32(power_model_node,
500 				 "dynamic-power-coefficient",
501 				 &temp)) {
502 		dev_err(mpp->dev, "dynamic-power-coefficient not available\n");
503 		return -EINVAL;
504 	}
505 	cooling_power_data.dyn_power_coeff = (unsigned long)temp;
506 
507 	if (of_property_read_u32_array(power_model_node,
508 				       "ts",
509 				       (u32 *)dec->ts,
510 				       4)) {
511 		dev_err(mpp->dev, "ts in power_model not available\n");
512 		return -EINVAL;
513 	}
514 
515 	return 0;
516 }
517 
devfreq_notifier_call(struct notifier_block * nb,unsigned long event,void * data)518 static int devfreq_notifier_call(struct notifier_block *nb,
519 				 unsigned long event,
520 				 void *data)
521 {
522 	struct rkvdec_dev *dec = container_of(nb,
523 					      struct rkvdec_dev,
524 					      devfreq_nb);
525 
526 	if (!dec)
527 		return NOTIFY_OK;
528 
529 	if (event == DEVFREQ_PRECHANGE)
530 		mutex_lock(&dec->sip_reset_lock);
531 	else if (event == DEVFREQ_POSTCHANGE)
532 		mutex_unlock(&dec->sip_reset_lock);
533 
534 	return NOTIFY_OK;
535 }
536 #endif
537 
538 /*
539  * NOTE: rkvdec/rkhevc put scaling list address in pps buffer hardware will read
540  * it by pps id in video stream data.
541  *
542  * So we need to translate the address in iommu case. The address data is also
543  * 10bit fd + 22bit offset mode.
544  * Because userspace decoder do not give the pps id in the register file sets
545  * kernel driver need to translate each scaling list address in pps buffer which
546  * means 256 pps for H.264, 64 pps for H.265.
547  *
548  * In order to optimize the performance kernel driver ask userspace decoder to
549  * set all scaling list address in pps buffer to the same one which will be used
550  * on current decoding task. Then kernel driver can only translate the first
551  * address then copy it all pps buffer.
552  */
fill_scaling_list_pps(struct rkvdec_task * task,int fd,int offset,int count,int pps_info_size,int sub_addr_offset)553 static int fill_scaling_list_pps(struct rkvdec_task *task,
554 				 int fd, int offset, int count,
555 				 int pps_info_size, int sub_addr_offset)
556 {
557 	struct dma_buf *dmabuf = NULL;
558 	void *vaddr = NULL;
559 	u8 *pps = NULL;
560 	u32 scaling_fd = 0;
561 	int ret = 0;
562 	u32 base = sub_addr_offset;
563 
564 	dmabuf = dma_buf_get(fd);
565 	if (IS_ERR_OR_NULL(dmabuf)) {
566 		mpp_err("invliad pps buffer\n");
567 		return -ENOENT;
568 	}
569 
570 	ret = dma_buf_begin_cpu_access(dmabuf, DMA_FROM_DEVICE);
571 	if (ret) {
572 		mpp_err("can't access the pps buffer\n");
573 		goto done;
574 	}
575 
576 	vaddr = dma_buf_vmap(dmabuf);
577 	if (!vaddr) {
578 		mpp_err("can't access the pps buffer\n");
579 		ret = -EIO;
580 		goto done;
581 	}
582 	pps = vaddr + offset;
583 	/* NOTE: scaling buffer in pps, have no offset */
584 	memcpy(&scaling_fd, pps + base, sizeof(scaling_fd));
585 	scaling_fd = le32_to_cpu(scaling_fd);
586 	if (scaling_fd > 0) {
587 		struct mpp_mem_region *mem_region = NULL;
588 		u32 tmp = 0;
589 		int i = 0;
590 
591 		mem_region = mpp_task_attach_fd(&task->mpp_task,
592 						scaling_fd);
593 		if (IS_ERR(mem_region)) {
594 			ret = PTR_ERR(mem_region);
595 			goto done;
596 		}
597 
598 		tmp = mem_region->iova & 0xffffffff;
599 		tmp = cpu_to_le32(tmp);
600 		mpp_debug(DEBUG_PPS_FILL,
601 			  "pps at %p, scaling fd: %3d => %pad + offset %10d\n",
602 			  pps, scaling_fd, &mem_region->iova, offset);
603 
604 		/* Fill the scaling list address in each pps entries */
605 		for (i = 0; i < count; i++, base += pps_info_size)
606 			memcpy(pps + base, &tmp, sizeof(tmp));
607 	}
608 
609 done:
610 	dma_buf_vunmap(dmabuf, vaddr);
611 	dma_buf_end_cpu_access(dmabuf, DMA_FROM_DEVICE);
612 	dma_buf_put(dmabuf);
613 
614 	return ret;
615 }
616 
rkvdec_process_scl_fd(struct mpp_session * session,struct rkvdec_task * task,struct mpp_task_msgs * msgs)617 static int rkvdec_process_scl_fd(struct mpp_session *session,
618 				 struct rkvdec_task *task,
619 				 struct mpp_task_msgs *msgs)
620 {
621 	int ret = 0;
622 	int pps_fd;
623 	u32 pps_offset;
624 	int idx = RKVDEC_REG_PPS_BASE_INDEX;
625 	u32 fmt = RKVDEC_GET_FORMAT(task->reg[RKVDEC_REG_SYS_CTRL_INDEX]);
626 
627 	if (session->msg_flags & MPP_FLAGS_REG_NO_OFFSET) {
628 		pps_fd = task->reg[idx];
629 		pps_offset = 0;
630 	} else {
631 		pps_fd = task->reg[idx] & 0x3ff;
632 		pps_offset = task->reg[idx] >> 10;
633 	}
634 
635 	pps_offset += mpp_query_reg_offset_info(&task->off_inf, idx);
636 	if (pps_fd > 0) {
637 		int pps_info_offset;
638 		int pps_info_count;
639 		int pps_info_size;
640 		int scaling_list_addr_offset;
641 
642 		switch (fmt) {
643 		case RKVDEC_FMT_H264D:
644 			pps_info_offset = pps_offset;
645 			pps_info_count = 256;
646 			pps_info_size = 32;
647 			scaling_list_addr_offset = 23;
648 			break;
649 		case RKVDEC_FMT_H265D:
650 			pps_info_offset = pps_offset;
651 			pps_info_count = 64;
652 			pps_info_size = 80;
653 			scaling_list_addr_offset = 74;
654 			break;
655 		default:
656 			pps_info_offset = 0;
657 			pps_info_count = 0;
658 			pps_info_size = 0;
659 			scaling_list_addr_offset = 0;
660 			break;
661 		}
662 
663 		mpp_debug(DEBUG_PPS_FILL,
664 			  "scaling list filling parameter:\n");
665 		mpp_debug(DEBUG_PPS_FILL,
666 			  "pps_info_offset %d\n", pps_info_offset);
667 		mpp_debug(DEBUG_PPS_FILL,
668 			  "pps_info_count  %d\n", pps_info_count);
669 		mpp_debug(DEBUG_PPS_FILL,
670 			  "pps_info_size   %d\n", pps_info_size);
671 		mpp_debug(DEBUG_PPS_FILL,
672 			  "scaling_list_addr_offset %d\n",
673 			  scaling_list_addr_offset);
674 
675 		if (pps_info_count) {
676 			ret = fill_scaling_list_pps(task, pps_fd,
677 						    pps_info_offset,
678 						    pps_info_count,
679 						    pps_info_size,
680 						    scaling_list_addr_offset);
681 			if (ret) {
682 				mpp_err("fill pps failed\n");
683 				goto fail;
684 			}
685 		}
686 	}
687 
688 fail:
689 	return ret;
690 }
691 
rkvdec_process_reg_fd(struct mpp_session * session,struct rkvdec_task * task,struct mpp_task_msgs * msgs)692 static int rkvdec_process_reg_fd(struct mpp_session *session,
693 				 struct rkvdec_task *task,
694 				 struct mpp_task_msgs *msgs)
695 {
696 	int ret = 0;
697 	u32 fmt = RKVDEC_GET_FORMAT(task->reg[RKVDEC_REG_SYS_CTRL_INDEX]);
698 
699 	/*
700 	 * special offset scale case
701 	 *
702 	 * This translation is for fd + offset translation.
703 	 * One register has 32bits. We need to transfer both buffer file
704 	 * handle and the start address offset so we packet file handle
705 	 * and offset together using below format.
706 	 *
707 	 *  0~9  bit for buffer file handle range 0 ~ 1023
708 	 * 10~31 bit for offset range 0 ~ 4M
709 	 *
710 	 * But on 4K case the offset can be larger the 4M
711 	 * So on VP9 4K decoder colmv base we scale the offset by 16
712 	 */
713 	if (fmt == RKVDEC_FMT_VP9D) {
714 		int fd;
715 		u32 offset;
716 		dma_addr_t iova = 0;
717 		struct mpp_mem_region *mem_region = NULL;
718 		int idx = RKVDEC_REG_VP9_REFCOLMV_BASE_INDEX;
719 
720 		if (session->msg_flags & MPP_FLAGS_REG_NO_OFFSET) {
721 			fd = task->reg[idx];
722 			offset = 0;
723 		} else {
724 			fd = task->reg[idx] & 0x3ff;
725 			offset = task->reg[idx] >> 10 << 4;
726 		}
727 		mem_region = mpp_task_attach_fd(&task->mpp_task, fd);
728 		if (IS_ERR(mem_region))
729 			return -EFAULT;
730 
731 		iova = mem_region->iova;
732 		task->reg[idx] = iova + offset;
733 	}
734 
735 	ret = mpp_translate_reg_address(session, &task->mpp_task,
736 					fmt, task->reg, &task->off_inf);
737 	if (ret)
738 		return ret;
739 
740 	mpp_translate_reg_offset_info(&task->mpp_task,
741 				      &task->off_inf, task->reg);
742 	return 0;
743 }
744 
rkvdec_extract_task_msg(struct rkvdec_task * task,struct mpp_task_msgs * msgs)745 static int rkvdec_extract_task_msg(struct rkvdec_task *task,
746 				   struct mpp_task_msgs *msgs)
747 {
748 	u32 i;
749 	int ret;
750 	struct mpp_request *req;
751 	struct mpp_hw_info *hw_info = task->mpp_task.hw_info;
752 
753 	for (i = 0; i < msgs->req_cnt; i++) {
754 		u32 off_s, off_e;
755 
756 		req = &msgs->reqs[i];
757 		if (!req->size)
758 			continue;
759 
760 		switch (req->cmd) {
761 		case MPP_CMD_SET_REG_WRITE: {
762 			off_s = hw_info->reg_start * sizeof(u32);
763 			off_e = hw_info->reg_end * sizeof(u32);
764 			ret = mpp_check_req(req, 0, sizeof(task->reg),
765 					    off_s, off_e);
766 			if (ret)
767 				continue;
768 			if (copy_from_user((u8 *)task->reg + req->offset,
769 					   req->data, req->size)) {
770 				mpp_err("copy_from_user reg failed\n");
771 				return -EIO;
772 			}
773 			memcpy(&task->w_reqs[task->w_req_cnt++],
774 			       req, sizeof(*req));
775 		} break;
776 		case MPP_CMD_SET_REG_READ: {
777 			off_s = hw_info->reg_start * sizeof(u32);
778 			off_e = hw_info->reg_end * sizeof(u32);
779 			ret = mpp_check_req(req, 0, sizeof(task->reg),
780 					    off_s, off_e);
781 			if (ret)
782 				continue;
783 			memcpy(&task->r_reqs[task->r_req_cnt++],
784 			       req, sizeof(*req));
785 		} break;
786 		case MPP_CMD_SET_REG_ADDR_OFFSET: {
787 			mpp_extract_reg_offset_info(&task->off_inf, req);
788 		} break;
789 		default:
790 			break;
791 		}
792 	}
793 	mpp_debug(DEBUG_TASK_INFO, "w_req_cnt %d, r_req_cnt %d\n",
794 		  task->w_req_cnt, task->r_req_cnt);
795 
796 	return 0;
797 }
798 
rkvdec_alloc_task(struct mpp_session * session,struct mpp_task_msgs * msgs)799 static void *rkvdec_alloc_task(struct mpp_session *session,
800 			       struct mpp_task_msgs *msgs)
801 {
802 	int ret;
803 	struct mpp_task *mpp_task = NULL;
804 	struct rkvdec_task *task = NULL;
805 	struct mpp_dev *mpp = session->mpp;
806 
807 	mpp_debug_enter();
808 
809 	task = kzalloc(sizeof(*task), GFP_KERNEL);
810 	if (!task)
811 		return NULL;
812 
813 	mpp_task = &task->mpp_task;
814 	mpp_task_init(session, mpp_task);
815 	mpp_task->hw_info = mpp->var->hw_info;
816 	mpp_task->reg = task->reg;
817 	/* extract reqs for current task */
818 	ret = rkvdec_extract_task_msg(task, msgs);
819 	if (ret)
820 		goto fail;
821 	/* process fd in pps for 264 and 265 */
822 	if (!(msgs->flags & MPP_FLAGS_SCL_FD_NO_TRANS)) {
823 		ret = rkvdec_process_scl_fd(session, task, msgs);
824 		if (ret)
825 			goto fail;
826 	}
827 	/* process fd in register */
828 	if (!(msgs->flags & MPP_FLAGS_REG_FD_NO_TRANS)) {
829 		ret = rkvdec_process_reg_fd(session, task, msgs);
830 		if (ret)
831 			goto fail;
832 	}
833 	task->strm_addr = task->reg[RKVDEC_REG_RLC_BASE_INDEX];
834 	task->link_mode = RKVDEC_MODE_ONEFRAME;
835 	task->clk_mode = CLK_MODE_NORMAL;
836 
837 	/* get resolution info */
838 	task->pixels = RKVDEC_GET_YSTRDE(task->reg[RKVDEC_RGE_YSTRDE_INDEX]);
839 	mpp_debug(DEBUG_TASK_INFO, "ystride=%d\n", task->pixels);
840 
841 	mpp_debug_leave();
842 
843 	return mpp_task;
844 
845 fail:
846 	mpp_task_dump_mem_region(mpp, mpp_task);
847 	mpp_task_dump_reg(mpp, mpp_task);
848 	mpp_task_finalize(session, mpp_task);
849 	kfree(task);
850 	return NULL;
851 }
852 
rkvdec_prepare_with_reset(struct mpp_dev * mpp,struct mpp_task * mpp_task)853 static void *rkvdec_prepare_with_reset(struct mpp_dev *mpp,
854 				       struct mpp_task *mpp_task)
855 {
856 	unsigned long flags;
857 	struct mpp_task *out_task = NULL;
858 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
859 
860 	spin_lock_irqsave(&mpp->queue->running_lock, flags);
861 	out_task = list_empty(&mpp->queue->running_list) ? mpp_task : NULL;
862 	spin_unlock_irqrestore(&mpp->queue->running_lock, flags);
863 
864 	if (out_task && !dec->had_reset) {
865 		struct rkvdec_task *task = to_rkvdec_task(out_task);
866 		u32 fmt = RKVDEC_GET_FORMAT(task->reg[RKVDEC_REG_SYS_CTRL_INDEX]);
867 
868 		/* in 3399 3228 and 3229 chips, when 264 switch vp9,
869 		 * hardware will timeout, and can't recover problem.
870 		 * so reset it when 264 switch vp9, before hardware run.
871 		 */
872 		if (dec->last_fmt == RKVDEC_FMT_H264D && fmt == RKVDEC_FMT_VP9D) {
873 			mpp_power_on(mpp);
874 			mpp_dev_reset(mpp);
875 			mpp_power_off(mpp);
876 		}
877 	}
878 
879 	return out_task;
880 }
881 
rkvdec_run(struct mpp_dev * mpp,struct mpp_task * mpp_task)882 static int rkvdec_run(struct mpp_dev *mpp,
883 		      struct mpp_task *mpp_task)
884 {
885 	int i;
886 	u32 reg_en;
887 	struct rkvdec_task *task = NULL;
888 
889 	mpp_debug_enter();
890 
891 	task = to_rkvdec_task(mpp_task);
892 	reg_en = mpp_task->hw_info->reg_en;
893 	switch (task->link_mode) {
894 	case RKVDEC_MODE_ONEFRAME: {
895 		u32 reg;
896 
897 		/* set cache size */
898 		reg = RKVDEC_CACHE_PERMIT_CACHEABLE_ACCESS
899 			| RKVDEC_CACHE_PERMIT_READ_ALLOCATE;
900 		if (!mpp_debug_unlikely(DEBUG_CACHE_32B))
901 			reg |= RKVDEC_CACHE_LINE_SIZE_64_BYTES;
902 
903 		mpp_write_relaxed(mpp, RKVDEC_REG_CACHE0_SIZE_BASE, reg);
904 		mpp_write_relaxed(mpp, RKVDEC_REG_CACHE1_SIZE_BASE, reg);
905 		/* clear cache */
906 		mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE0_BASE, 1);
907 		mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE1_BASE, 1);
908 		/* set registers for hardware */
909 		for (i = 0; i < task->w_req_cnt; i++) {
910 			int s, e;
911 			struct mpp_request *req = &task->w_reqs[i];
912 
913 			s = req->offset / sizeof(u32);
914 			e = s + req->size / sizeof(u32);
915 			mpp_write_req(mpp, task->reg, s, e, reg_en);
916 		}
917 		/* init current task */
918 		mpp->cur_task = mpp_task;
919 		/* Flush the register before the start the device */
920 		wmb();
921 		mpp_write(mpp, RKVDEC_REG_INT_EN,
922 			  task->reg[reg_en] | RKVDEC_DEC_START);
923 	} break;
924 	default:
925 		break;
926 	}
927 
928 	mpp_debug_leave();
929 
930 	return 0;
931 }
932 
rkvdec_3328_run(struct mpp_dev * mpp,struct mpp_task * mpp_task)933 static int rkvdec_3328_run(struct mpp_dev *mpp,
934 			   struct mpp_task *mpp_task)
935 {
936 	u32 fmt = 0;
937 	u32 cfg = 0;
938 	struct rkvdec_task *task = NULL;
939 
940 	mpp_debug_enter();
941 
942 	task = to_rkvdec_task(mpp_task);
943 
944 	/*
945 	 * HW defeat workaround: VP9 power save optimization cause decoding
946 	 * corruption, disable optimization here.
947 	 */
948 	fmt = RKVDEC_GET_FORMAT(task->reg[RKVDEC_REG_SYS_CTRL_INDEX]);
949 	if (fmt == RKVDEC_FMT_VP9D) {
950 		cfg = task->reg[RKVDEC_POWER_CTL_INDEX] | 0xFFFF;
951 		task->reg[RKVDEC_POWER_CTL_INDEX] = cfg & (~(1 << 12));
952 		mpp_write_relaxed(mpp, RKVDEC_POWER_CTL_BASE,
953 				  task->reg[RKVDEC_POWER_CTL_INDEX]);
954 	}
955 
956 	rkvdec_run(mpp, mpp_task);
957 
958 	mpp_debug_leave();
959 
960 	return 0;
961 }
962 
rkvdec_1126_run(struct mpp_dev * mpp,struct mpp_task * mpp_task)963 static int rkvdec_1126_run(struct mpp_dev *mpp, struct mpp_task *mpp_task)
964 {
965 	struct rkvdec_task *task = to_rkvdec_task(mpp_task);
966 
967 	if (task->link_mode == RKVDEC_MODE_ONEFRAME)
968 		mpp_iommu_flush_tlb(mpp->iommu_info);
969 
970 	return rkvdec_run(mpp, mpp_task);
971 }
972 
rkvdec_irq(struct mpp_dev * mpp)973 static int rkvdec_irq(struct mpp_dev *mpp)
974 {
975 	mpp->irq_status = mpp_read(mpp, RKVDEC_REG_INT_EN);
976 	if (!(mpp->irq_status & RKVDEC_DEC_INT_RAW))
977 		return IRQ_NONE;
978 
979 	mpp_write(mpp, RKVDEC_REG_INT_EN, 0);
980 
981 	return IRQ_WAKE_THREAD;
982 }
983 
rkvdec_isr(struct mpp_dev * mpp)984 static int rkvdec_isr(struct mpp_dev *mpp)
985 {
986 	u32 err_mask;
987 	struct rkvdec_task *task = NULL;
988 	struct mpp_task *mpp_task = mpp->cur_task;
989 
990 	mpp_debug_enter();
991 	/* FIXME use a spin lock here */
992 	if (!mpp_task) {
993 		dev_err(mpp->dev, "no current task\n");
994 		goto done;
995 	}
996 	mpp_time_diff(mpp_task);
997 	mpp->cur_task = NULL;
998 	task = to_rkvdec_task(mpp_task);
999 	task->irq_status = mpp->irq_status;
1000 	switch (task->link_mode) {
1001 	case RKVDEC_MODE_ONEFRAME: {
1002 		mpp_debug(DEBUG_IRQ_STATUS, "irq_status: %08x\n", task->irq_status);
1003 
1004 		err_mask = RKVDEC_INT_BUF_EMPTY
1005 			| RKVDEC_INT_BUS_ERROR
1006 			| RKVDEC_INT_COLMV_REF_ERROR
1007 			| RKVDEC_INT_STRM_ERROR
1008 			| RKVDEC_INT_TIMEOUT;
1009 
1010 		if (err_mask & task->irq_status)
1011 			atomic_inc(&mpp->reset_request);
1012 
1013 		mpp_task_finish(mpp_task->session, mpp_task);
1014 	} break;
1015 	default:
1016 		break;
1017 	}
1018 done:
1019 	mpp_debug_leave();
1020 	return IRQ_HANDLED;
1021 }
1022 
rkvdec_3328_isr(struct mpp_dev * mpp)1023 static int rkvdec_3328_isr(struct mpp_dev *mpp)
1024 {
1025 	u32 err_mask;
1026 	struct rkvdec_task *task = NULL;
1027 	struct mpp_task *mpp_task = mpp->cur_task;
1028 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1029 
1030 	mpp_debug_enter();
1031 	/* FIXME use a spin lock here */
1032 	if (!mpp_task) {
1033 		dev_err(mpp->dev, "no current task\n");
1034 		goto done;
1035 	}
1036 	mpp_time_diff(mpp_task);
1037 	mpp->cur_task = NULL;
1038 	task = to_rkvdec_task(mpp_task);
1039 	task->irq_status = mpp->irq_status;
1040 	mpp_debug(DEBUG_IRQ_STATUS, "irq_status: %08x\n", task->irq_status);
1041 
1042 	err_mask = RKVDEC_INT_BUF_EMPTY
1043 		| RKVDEC_INT_BUS_ERROR
1044 		| RKVDEC_INT_COLMV_REF_ERROR
1045 		| RKVDEC_INT_STRM_ERROR
1046 		| RKVDEC_INT_TIMEOUT;
1047 	if (err_mask & task->irq_status)
1048 		atomic_inc(&mpp->reset_request);
1049 
1050 	/* unmap reserve buffer */
1051 	if (dec->aux_iova != -1) {
1052 		iommu_unmap(mpp->iommu_info->domain, dec->aux_iova, IOMMU_PAGE_SIZE);
1053 		dec->aux_iova = -1;
1054 	}
1055 
1056 	mpp_task_finish(mpp_task->session, mpp_task);
1057 done:
1058 	mpp_debug_leave();
1059 	return IRQ_HANDLED;
1060 }
1061 
rkvdec_finish(struct mpp_dev * mpp,struct mpp_task * mpp_task)1062 static int rkvdec_finish(struct mpp_dev *mpp,
1063 			 struct mpp_task *mpp_task)
1064 {
1065 	u32 i;
1066 	u32 dec_get;
1067 	s32 dec_length;
1068 	struct rkvdec_task *task = to_rkvdec_task(mpp_task);
1069 
1070 	mpp_debug_enter();
1071 
1072 	switch (task->link_mode) {
1073 	case RKVDEC_MODE_ONEFRAME: {
1074 		u32 s, e;
1075 		struct mpp_request *req;
1076 
1077 		/* read register after running */
1078 		for (i = 0; i < task->r_req_cnt; i++) {
1079 			req = &task->r_reqs[i];
1080 			s = req->offset / sizeof(u32);
1081 			e = s + req->size / sizeof(u32);
1082 			mpp_read_req(mpp, task->reg, s, e);
1083 		}
1084 		/* revert hack for irq status */
1085 		task->reg[RKVDEC_REG_INT_EN_INDEX] = task->irq_status;
1086 		/* revert hack for decoded length */
1087 		dec_get = mpp_read_relaxed(mpp, RKVDEC_REG_RLC_BASE);
1088 		dec_length = dec_get - task->strm_addr;
1089 		task->reg[RKVDEC_REG_RLC_BASE_INDEX] = dec_length << 10;
1090 		mpp_debug(DEBUG_REGISTER,
1091 			  "dec_get %08x dec_length %d\n", dec_get, dec_length);
1092 	} break;
1093 	default:
1094 		break;
1095 	}
1096 
1097 	mpp_debug_leave();
1098 
1099 	return 0;
1100 }
1101 
rkvdec_finish_with_record_info(struct mpp_dev * mpp,struct mpp_task * mpp_task)1102 static int rkvdec_finish_with_record_info(struct mpp_dev *mpp,
1103 					  struct mpp_task *mpp_task)
1104 {
1105 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1106 	struct rkvdec_task *task = to_rkvdec_task(mpp_task);
1107 
1108 	rkvdec_finish(mpp, mpp_task);
1109 	dec->last_fmt = RKVDEC_GET_FORMAT(task->reg[RKVDEC_REG_SYS_CTRL_INDEX]);
1110 	dec->had_reset = (atomic_read(&mpp->reset_request) > 0) ? true : false;
1111 
1112 	return 0;
1113 }
1114 
rkvdec_result(struct mpp_dev * mpp,struct mpp_task * mpp_task,struct mpp_task_msgs * msgs)1115 static int rkvdec_result(struct mpp_dev *mpp,
1116 			 struct mpp_task *mpp_task,
1117 			 struct mpp_task_msgs *msgs)
1118 {
1119 	u32 i;
1120 	struct mpp_request *req;
1121 	struct rkvdec_task *task = to_rkvdec_task(mpp_task);
1122 
1123 	/* FIXME may overflow the kernel */
1124 	for (i = 0; i < task->r_req_cnt; i++) {
1125 		req = &task->r_reqs[i];
1126 
1127 		if (copy_to_user(req->data,
1128 				 (u8 *)task->reg + req->offset,
1129 				 req->size)) {
1130 			mpp_err("copy_to_user reg fail\n");
1131 			return -EIO;
1132 		}
1133 	}
1134 
1135 	return 0;
1136 }
1137 
rkvdec_free_task(struct mpp_session * session,struct mpp_task * mpp_task)1138 static int rkvdec_free_task(struct mpp_session *session,
1139 			    struct mpp_task *mpp_task)
1140 {
1141 	struct rkvdec_task *task = to_rkvdec_task(mpp_task);
1142 
1143 	mpp_task_finalize(session, mpp_task);
1144 	kfree(task);
1145 
1146 	return 0;
1147 }
1148 
1149 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
rkvdec_procfs_remove(struct mpp_dev * mpp)1150 static int rkvdec_procfs_remove(struct mpp_dev *mpp)
1151 {
1152 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1153 
1154 	if (dec->procfs) {
1155 		proc_remove(dec->procfs);
1156 		dec->procfs = NULL;
1157 	}
1158 
1159 	return 0;
1160 }
1161 
rkvdec_procfs_init(struct mpp_dev * mpp)1162 static int rkvdec_procfs_init(struct mpp_dev *mpp)
1163 {
1164 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1165 
1166 	dec->procfs = proc_mkdir(mpp->dev->of_node->name, mpp->srv->procfs);
1167 	if (IS_ERR_OR_NULL(dec->procfs)) {
1168 		mpp_err("failed on open procfs\n");
1169 		dec->procfs = NULL;
1170 		return -EIO;
1171 	}
1172 	mpp_procfs_create_u32("aclk", 0644,
1173 			      dec->procfs, &dec->aclk_info.debug_rate_hz);
1174 	mpp_procfs_create_u32("clk_core", 0644,
1175 			      dec->procfs, &dec->core_clk_info.debug_rate_hz);
1176 	mpp_procfs_create_u32("clk_cabac", 0644,
1177 			      dec->procfs, &dec->cabac_clk_info.debug_rate_hz);
1178 	mpp_procfs_create_u32("clk_hevc_cabac", 0644,
1179 			      dec->procfs, &dec->hevc_cabac_clk_info.debug_rate_hz);
1180 	mpp_procfs_create_u32("session_buffers", 0644,
1181 			      dec->procfs, &mpp->session_max_buffers);
1182 
1183 	return 0;
1184 }
1185 #else
rkvdec_procfs_remove(struct mpp_dev * mpp)1186 static inline int rkvdec_procfs_remove(struct mpp_dev *mpp)
1187 {
1188 	return 0;
1189 }
1190 
rkvdec_procfs_init(struct mpp_dev * mpp)1191 static inline int rkvdec_procfs_init(struct mpp_dev *mpp)
1192 {
1193 	return 0;
1194 }
1195 #endif
1196 
rkvdec_init(struct mpp_dev * mpp)1197 static int rkvdec_init(struct mpp_dev *mpp)
1198 {
1199 	int ret;
1200 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1201 
1202 	mutex_init(&dec->sip_reset_lock);
1203 	mpp->grf_info = &mpp->srv->grf_infos[MPP_DRIVER_RKVDEC];
1204 
1205 	/* Get clock info from dtsi */
1206 	ret = mpp_get_clk_info(mpp, &dec->aclk_info, "aclk_vcodec");
1207 	if (ret)
1208 		mpp_err("failed on clk_get aclk_vcodec\n");
1209 	ret = mpp_get_clk_info(mpp, &dec->hclk_info, "hclk_vcodec");
1210 	if (ret)
1211 		mpp_err("failed on clk_get hclk_vcodec\n");
1212 	ret = mpp_get_clk_info(mpp, &dec->core_clk_info, "clk_core");
1213 	if (ret)
1214 		mpp_err("failed on clk_get clk_core\n");
1215 	ret = mpp_get_clk_info(mpp, &dec->cabac_clk_info, "clk_cabac");
1216 	if (ret)
1217 		mpp_err("failed on clk_get clk_cabac\n");
1218 	ret = mpp_get_clk_info(mpp, &dec->hevc_cabac_clk_info, "clk_hevc_cabac");
1219 	if (ret)
1220 		mpp_err("failed on clk_get clk_hevc_cabac\n");
1221 	/* Set default rates */
1222 	mpp_set_clk_info_rate_hz(&dec->aclk_info, CLK_MODE_DEFAULT, 300 * MHZ);
1223 	mpp_set_clk_info_rate_hz(&dec->core_clk_info, CLK_MODE_DEFAULT, 200 * MHZ);
1224 	mpp_set_clk_info_rate_hz(&dec->cabac_clk_info, CLK_MODE_DEFAULT, 200 * MHZ);
1225 	mpp_set_clk_info_rate_hz(&dec->hevc_cabac_clk_info, CLK_MODE_DEFAULT, 300 * MHZ);
1226 
1227 	/* Get normal max workload from dtsi */
1228 	of_property_read_u32(mpp->dev->of_node,
1229 			     "rockchip,default-max-load", &dec->default_max_load);
1230 	/* Get reset control from dtsi */
1231 	dec->rst_a = mpp_reset_control_get(mpp, RST_TYPE_A, "video_a");
1232 	if (!dec->rst_a)
1233 		mpp_err("No aclk reset resource define\n");
1234 	dec->rst_h = mpp_reset_control_get(mpp, RST_TYPE_H, "video_h");
1235 	if (!dec->rst_h)
1236 		mpp_err("No hclk reset resource define\n");
1237 	dec->rst_niu_a = mpp_reset_control_get(mpp, RST_TYPE_NIU_A, "niu_a");
1238 	if (!dec->rst_niu_a)
1239 		mpp_err("No niu aclk reset resource define\n");
1240 	dec->rst_niu_h = mpp_reset_control_get(mpp, RST_TYPE_NIU_H, "niu_h");
1241 	if (!dec->rst_niu_h)
1242 		mpp_err("No niu hclk reset resource define\n");
1243 	dec->rst_core = mpp_reset_control_get(mpp, RST_TYPE_CORE, "video_core");
1244 	if (!dec->rst_core)
1245 		mpp_err("No core reset resource define\n");
1246 	dec->rst_cabac = mpp_reset_control_get(mpp, RST_TYPE_CABAC, "video_cabac");
1247 	if (!dec->rst_cabac)
1248 		mpp_err("No cabac reset resource define\n");
1249 	dec->rst_hevc_cabac = mpp_reset_control_get(mpp, RST_TYPE_HEVC_CABAC, "video_hevc_cabac");
1250 	if (!dec->rst_hevc_cabac)
1251 		mpp_err("No hevc cabac reset resource define\n");
1252 
1253 	return 0;
1254 }
1255 
rkvdec_3328_iommu_hdl(struct iommu_domain * iommu,struct device * iommu_dev,unsigned long iova,int status,void * arg)1256 static int rkvdec_3328_iommu_hdl(struct iommu_domain *iommu,
1257 				 struct device *iommu_dev,
1258 				 unsigned long iova,
1259 				 int status, void *arg)
1260 {
1261 	int ret = 0;
1262 	struct mpp_dev *mpp = (struct mpp_dev *)arg;
1263 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1264 
1265 	/*
1266 	 * defeat workaround, invalidate address generated when rk322x
1267 	 * hevc decoder tile mode pre-fetch colmv data.
1268 	 */
1269 	if (IOMMU_GET_BUS_ID(status) == 2) {
1270 		unsigned long page_iova = 0;
1271 		/* avoid another page fault occur after page fault */
1272 		if (dec->aux_iova != -1) {
1273 			iommu_unmap(mpp->iommu_info->domain, dec->aux_iova, IOMMU_PAGE_SIZE);
1274 			dec->aux_iova = -1;
1275 		}
1276 
1277 		page_iova = round_down(iova, IOMMU_PAGE_SIZE);
1278 		ret = iommu_map(mpp->iommu_info->domain, page_iova,
1279 				page_to_phys(dec->aux_page), IOMMU_PAGE_SIZE,
1280 				IOMMU_READ | IOMMU_WRITE);
1281 		if (!ret)
1282 			dec->aux_iova = page_iova;
1283 	}
1284 
1285 	return ret;
1286 }
1287 
1288 #ifdef CONFIG_PM_DEVFREQ
rkvdec_devfreq_remove(struct mpp_dev * mpp)1289 static int rkvdec_devfreq_remove(struct mpp_dev *mpp)
1290 {
1291 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1292 
1293 	devfreq_unregister_opp_notifier(mpp->dev, dec->devfreq);
1294 	dev_pm_opp_of_remove_table(mpp->dev);
1295 
1296 	return 0;
1297 }
1298 
rkvdec_devfreq_init(struct mpp_dev * mpp)1299 static int rkvdec_devfreq_init(struct mpp_dev *mpp)
1300 {
1301 	int ret = 0;
1302 	struct devfreq_dev_status *stat;
1303 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1304 
1305 	mutex_init(&dec->set_clk_lock);
1306 	dec->parent_devfreq = devfreq_get_devfreq_by_phandle(mpp->dev, "rkvdec_devfreq", 0);
1307 	if (IS_ERR_OR_NULL(dec->parent_devfreq)) {
1308 		if (PTR_ERR(dec->parent_devfreq) == -EPROBE_DEFER) {
1309 			dev_warn(mpp->dev, "parent devfreq is not ready, retry\n");
1310 
1311 			return -EPROBE_DEFER;
1312 		}
1313 	} else {
1314 		dec->devfreq_nb.notifier_call = devfreq_notifier_call;
1315 		devm_devfreq_register_notifier(mpp->dev,
1316 					       dec->parent_devfreq,
1317 					       &dec->devfreq_nb,
1318 					       DEVFREQ_TRANSITION_NOTIFIER);
1319 	}
1320 
1321 	dec->vdd = devm_regulator_get_optional(mpp->dev, "vcodec");
1322 	if (IS_ERR_OR_NULL(dec->vdd)) {
1323 		if (PTR_ERR(dec->vdd) == -EPROBE_DEFER) {
1324 			dev_warn(mpp->dev, "vcodec regulator not ready, retry\n");
1325 
1326 			return -EPROBE_DEFER;
1327 		}
1328 		dev_warn(mpp->dev, "no regulator for vcodec\n");
1329 
1330 		return 0;
1331 	}
1332 
1333 	ret = rockchip_init_opp_table(mpp->dev, NULL,
1334 				      "rkvdec_leakage", "vcodec");
1335 	if (ret) {
1336 		dev_err(mpp->dev, "Failed to init_opp_table\n");
1337 		goto done;
1338 	}
1339 	dec->devfreq = devm_devfreq_add_device(mpp->dev, &devfreq_profile,
1340 					       "userspace", NULL);
1341 	if (IS_ERR(dec->devfreq)) {
1342 		ret = PTR_ERR(dec->devfreq);
1343 		goto done;
1344 	}
1345 
1346 	stat = &dec->devfreq->last_status;
1347 	stat->current_frequency = clk_get_rate(dec->aclk_info.clk);
1348 
1349 	ret = devfreq_register_opp_notifier(mpp->dev, dec->devfreq);
1350 	if (ret)
1351 		goto done;
1352 
1353 	/* power simplle init */
1354 	ret = power_model_simple_init(mpp);
1355 	if (!ret && dec->devfreq) {
1356 		dec->devfreq_cooling =
1357 			of_devfreq_cooling_register_power(mpp->dev->of_node,
1358 							  dec->devfreq,
1359 							  &cooling_power_data);
1360 		if (IS_ERR_OR_NULL(dec->devfreq_cooling)) {
1361 			ret = -ENXIO;
1362 			dev_err(mpp->dev, "Failed to register cooling\n");
1363 			goto done;
1364 		}
1365 	}
1366 
1367 done:
1368 	return ret;
1369 }
1370 #else
rkvdec_devfreq_remove(struct mpp_dev * mpp)1371 static inline int rkvdec_devfreq_remove(struct mpp_dev *mpp)
1372 {
1373 	return 0;
1374 }
1375 
rkvdec_devfreq_init(struct mpp_dev * mpp)1376 static inline int rkvdec_devfreq_init(struct mpp_dev *mpp)
1377 {
1378 	return 0;
1379 }
1380 #endif
1381 
rkvdec_3328_init(struct mpp_dev * mpp)1382 static int rkvdec_3328_init(struct mpp_dev *mpp)
1383 {
1384 	int ret = 0;
1385 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1386 
1387 	rkvdec_init(mpp);
1388 
1389 	/* warkaround for mmu pagefault */
1390 	dec->aux_page = alloc_page(GFP_KERNEL);
1391 	if (!dec->aux_page) {
1392 		dev_err(mpp->dev, "allocate a page for auxiliary usage\n");
1393 		ret = -ENOMEM;
1394 		goto done;
1395 	}
1396 	dec->aux_iova = -1;
1397 	mpp->iommu_info->hdl = rkvdec_3328_iommu_hdl;
1398 
1399 	ret = rkvdec_devfreq_init(mpp);
1400 done:
1401 	return ret;
1402 }
1403 
rkvdec_3328_exit(struct mpp_dev * mpp)1404 static int rkvdec_3328_exit(struct mpp_dev *mpp)
1405 {
1406 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1407 
1408 	if (dec->aux_page)
1409 		__free_page(dec->aux_page);
1410 
1411 	if (dec->aux_iova != -1) {
1412 		iommu_unmap(mpp->iommu_info->domain, dec->aux_iova, IOMMU_PAGE_SIZE);
1413 		dec->aux_iova = -1;
1414 	}
1415 	rkvdec_devfreq_remove(mpp);
1416 
1417 	return 0;
1418 }
1419 
rkvdec_clk_on(struct mpp_dev * mpp)1420 static int rkvdec_clk_on(struct mpp_dev *mpp)
1421 {
1422 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1423 
1424 	mpp_clk_safe_enable(dec->aclk_info.clk);
1425 	mpp_clk_safe_enable(dec->hclk_info.clk);
1426 	mpp_clk_safe_enable(dec->core_clk_info.clk);
1427 	mpp_clk_safe_enable(dec->cabac_clk_info.clk);
1428 	mpp_clk_safe_enable(dec->hevc_cabac_clk_info.clk);
1429 
1430 	return 0;
1431 }
1432 
rkvdec_clk_off(struct mpp_dev * mpp)1433 static int rkvdec_clk_off(struct mpp_dev *mpp)
1434 {
1435 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1436 
1437 	clk_disable_unprepare(dec->aclk_info.clk);
1438 	clk_disable_unprepare(dec->hclk_info.clk);
1439 	clk_disable_unprepare(dec->core_clk_info.clk);
1440 	clk_disable_unprepare(dec->cabac_clk_info.clk);
1441 	clk_disable_unprepare(dec->hevc_cabac_clk_info.clk);
1442 
1443 	return 0;
1444 }
1445 
rkvdec_get_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)1446 static int rkvdec_get_freq(struct mpp_dev *mpp,
1447 			   struct mpp_task *mpp_task)
1448 {
1449 	u32 task_cnt;
1450 	u32 workload;
1451 	struct mpp_task *loop = NULL, *n;
1452 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1453 	struct rkvdec_task *task = to_rkvdec_task(mpp_task);
1454 
1455 	/* if not set max load, consider not have advanced mode */
1456 	if (!dec->default_max_load || !task->pixels)
1457 		return 0;
1458 
1459 	task_cnt = 1;
1460 	workload = task->pixels;
1461 	/* calc workload in pending list */
1462 	mutex_lock(&mpp->queue->pending_lock);
1463 	list_for_each_entry_safe(loop, n,
1464 				 &mpp->queue->pending_list,
1465 				 queue_link) {
1466 		struct rkvdec_task *loop_task = to_rkvdec_task(loop);
1467 
1468 		task_cnt++;
1469 		workload += loop_task->pixels;
1470 	}
1471 	mutex_unlock(&mpp->queue->pending_lock);
1472 
1473 	if (workload > dec->default_max_load)
1474 		task->clk_mode = CLK_MODE_ADVANCED;
1475 
1476 	mpp_debug(DEBUG_TASK_INFO, "pending task %d, workload %d, clk_mode=%d\n",
1477 		  task_cnt, workload, task->clk_mode);
1478 
1479 	return 0;
1480 }
1481 
rkvdec_3328_get_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)1482 static int rkvdec_3328_get_freq(struct mpp_dev *mpp,
1483 				struct mpp_task *mpp_task)
1484 {
1485 	u32 fmt;
1486 	u32 ddr_align_en;
1487 	struct rkvdec_task *task =  to_rkvdec_task(mpp_task);
1488 
1489 	fmt = RKVDEC_GET_FORMAT(task->reg[RKVDEC_REG_SYS_CTRL_INDEX]);
1490 	ddr_align_en = task->reg[RKVDEC_REG_INT_EN_INDEX] & RKVDEC_WR_DDR_ALIGN_EN;
1491 	if (fmt == RKVDEC_FMT_H264D && ddr_align_en)
1492 		task->clk_mode = CLK_MODE_ADVANCED;
1493 	else
1494 		rkvdec_get_freq(mpp, mpp_task);
1495 
1496 	return 0;
1497 }
1498 
rkvdec_3368_set_grf(struct mpp_dev * mpp)1499 static int rkvdec_3368_set_grf(struct mpp_dev *mpp)
1500 {
1501 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1502 
1503 	dec->grf_changed = mpp_grf_is_changed(mpp->grf_info);
1504 	mpp_set_grf(mpp->grf_info);
1505 
1506 	return 0;
1507 }
1508 
rkvdec_set_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)1509 static int rkvdec_set_freq(struct mpp_dev *mpp,
1510 			   struct mpp_task *mpp_task)
1511 {
1512 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1513 	struct rkvdec_task *task =  to_rkvdec_task(mpp_task);
1514 
1515 	mpp_clk_set_rate(&dec->aclk_info, task->clk_mode);
1516 	mpp_clk_set_rate(&dec->core_clk_info, task->clk_mode);
1517 	mpp_clk_set_rate(&dec->cabac_clk_info, task->clk_mode);
1518 	mpp_clk_set_rate(&dec->hevc_cabac_clk_info, task->clk_mode);
1519 
1520 	return 0;
1521 }
1522 
rkvdec_3368_set_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)1523 static int rkvdec_3368_set_freq(struct mpp_dev *mpp, struct mpp_task *mpp_task)
1524 {
1525 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1526 	struct rkvdec_task *task =  to_rkvdec_task(mpp_task);
1527 
1528 	/* if grf changed, need reset iommu for rk3368 */
1529 	if (dec->grf_changed) {
1530 		mpp_iommu_refresh(mpp->iommu_info, mpp->dev);
1531 		dec->grf_changed = false;
1532 	}
1533 
1534 	mpp_clk_set_rate(&dec->aclk_info, task->clk_mode);
1535 	mpp_clk_set_rate(&dec->core_clk_info, task->clk_mode);
1536 	mpp_clk_set_rate(&dec->cabac_clk_info, task->clk_mode);
1537 	mpp_clk_set_rate(&dec->hevc_cabac_clk_info, task->clk_mode);
1538 
1539 	return 0;
1540 }
1541 
rkvdec_3328_set_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)1542 static int rkvdec_3328_set_freq(struct mpp_dev *mpp,
1543 				struct mpp_task *mpp_task)
1544 {
1545 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1546 	struct rkvdec_task *task =  to_rkvdec_task(mpp_task);
1547 
1548 #ifdef CONFIG_PM_DEVFREQ
1549 	if (dec->devfreq) {
1550 		struct devfreq_dev_status *stat;
1551 		unsigned long aclk_rate_hz, core_rate_hz, cabac_rate_hz;
1552 
1553 		stat = &dec->devfreq->last_status;
1554 		stat->busy_time = 1;
1555 		stat->total_time = 1;
1556 		aclk_rate_hz = mpp_get_clk_info_rate_hz(&dec->aclk_info,
1557 							task->clk_mode);
1558 		core_rate_hz = mpp_get_clk_info_rate_hz(&dec->core_clk_info,
1559 							task->clk_mode);
1560 		cabac_rate_hz = mpp_get_clk_info_rate_hz(&dec->cabac_clk_info,
1561 							 task->clk_mode);
1562 		rkvdec_devf_set_clk(dec, aclk_rate_hz,
1563 				    core_rate_hz, cabac_rate_hz,
1564 				    EVENT_ADJUST);
1565 	}
1566 #else
1567 	mpp_clk_set_rate(&dec->aclk_info, task->clk_mode);
1568 	mpp_clk_set_rate(&dec->core_clk_info, task->clk_mode);
1569 	mpp_clk_set_rate(&dec->cabac_clk_info, task->clk_mode);
1570 #endif
1571 
1572 	return 0;
1573 }
1574 
rkvdec_reduce_freq(struct mpp_dev * mpp)1575 static int rkvdec_reduce_freq(struct mpp_dev *mpp)
1576 {
1577 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1578 
1579 	mpp_clk_set_rate(&dec->aclk_info, CLK_MODE_REDUCE);
1580 	mpp_clk_set_rate(&dec->core_clk_info, CLK_MODE_REDUCE);
1581 	mpp_clk_set_rate(&dec->cabac_clk_info, CLK_MODE_REDUCE);
1582 	mpp_clk_set_rate(&dec->hevc_cabac_clk_info, CLK_MODE_REDUCE);
1583 
1584 	return 0;
1585 }
1586 
rkvdec_3328_reduce_freq(struct mpp_dev * mpp)1587 static int rkvdec_3328_reduce_freq(struct mpp_dev *mpp)
1588 {
1589 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1590 
1591 #ifdef CONFIG_PM_DEVFREQ
1592 	if (dec->devfreq) {
1593 		struct devfreq_dev_status *stat;
1594 		unsigned long aclk_rate_hz, core_rate_hz, cabac_rate_hz;
1595 
1596 		stat = &dec->devfreq->last_status;
1597 		stat->busy_time = 0;
1598 		stat->total_time = 1;
1599 		aclk_rate_hz = mpp_get_clk_info_rate_hz(&dec->aclk_info,
1600 							CLK_MODE_REDUCE);
1601 		core_rate_hz = mpp_get_clk_info_rate_hz(&dec->core_clk_info,
1602 							CLK_MODE_REDUCE);
1603 		cabac_rate_hz = mpp_get_clk_info_rate_hz(&dec->cabac_clk_info,
1604 							 CLK_MODE_REDUCE);
1605 		rkvdec_devf_set_clk(dec, aclk_rate_hz,
1606 				    core_rate_hz, cabac_rate_hz,
1607 				    EVENT_ADJUST);
1608 	}
1609 #else
1610 	mpp_clk_set_rate(&dec->aclk_info, CLK_MODE_REDUCE);
1611 	mpp_clk_set_rate(&dec->core_clk_info, CLK_MODE_REDUCE);
1612 	mpp_clk_set_rate(&dec->cabac_clk_info, CLK_MODE_REDUCE);
1613 #endif
1614 
1615 	return 0;
1616 }
1617 
rkvdec_reset(struct mpp_dev * mpp)1618 static int rkvdec_reset(struct mpp_dev *mpp)
1619 {
1620 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1621 
1622 	mpp_debug_enter();
1623 	if (dec->rst_a && dec->rst_h) {
1624 		mpp_pmu_idle_request(mpp, true);
1625 		mpp_safe_reset(dec->rst_niu_a);
1626 		mpp_safe_reset(dec->rst_niu_h);
1627 		mpp_safe_reset(dec->rst_a);
1628 		mpp_safe_reset(dec->rst_h);
1629 		mpp_safe_reset(dec->rst_core);
1630 		mpp_safe_reset(dec->rst_cabac);
1631 		mpp_safe_reset(dec->rst_hevc_cabac);
1632 		udelay(5);
1633 		mpp_safe_unreset(dec->rst_niu_h);
1634 		mpp_safe_unreset(dec->rst_niu_a);
1635 		mpp_safe_unreset(dec->rst_a);
1636 		mpp_safe_unreset(dec->rst_h);
1637 		mpp_safe_unreset(dec->rst_core);
1638 		mpp_safe_unreset(dec->rst_cabac);
1639 		mpp_safe_unreset(dec->rst_hevc_cabac);
1640 		mpp_pmu_idle_request(mpp, false);
1641 	}
1642 	mpp_debug_leave();
1643 
1644 	return 0;
1645 }
1646 
rkvdec_sip_reset(struct mpp_dev * mpp)1647 static int rkvdec_sip_reset(struct mpp_dev *mpp)
1648 {
1649 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1650 
1651 /* The reset flow in arm trustzone firmware */
1652 #if IS_ENABLED(CONFIG_ROCKCHIP_SIP)
1653 	mutex_lock(&dec->sip_reset_lock);
1654 	sip_smc_vpu_reset(0, 0, 0);
1655 	mutex_unlock(&dec->sip_reset_lock);
1656 
1657 	return 0;
1658 #else
1659 	return rkvdec_reset(mpp);
1660 #endif
1661 }
1662 
1663 static struct mpp_hw_ops rkvdec_v1_hw_ops = {
1664 	.init = rkvdec_init,
1665 	.clk_on = rkvdec_clk_on,
1666 	.clk_off = rkvdec_clk_off,
1667 	.get_freq = rkvdec_get_freq,
1668 	.set_freq = rkvdec_set_freq,
1669 	.reduce_freq = rkvdec_reduce_freq,
1670 	.reset = rkvdec_reset,
1671 };
1672 
1673 static struct mpp_hw_ops rkvdec_3399_hw_ops = {
1674 	.init = rkvdec_init,
1675 	.clk_on = rkvdec_clk_on,
1676 	.clk_off = rkvdec_clk_off,
1677 	.get_freq = rkvdec_get_freq,
1678 	.set_freq = rkvdec_set_freq,
1679 	.reduce_freq = rkvdec_reduce_freq,
1680 	.reset = rkvdec_reset,
1681 };
1682 
1683 static struct mpp_hw_ops rkvdec_3368_hw_ops = {
1684 	.init = rkvdec_init,
1685 	.clk_on = rkvdec_clk_on,
1686 	.clk_off = rkvdec_clk_off,
1687 	.get_freq = rkvdec_get_freq,
1688 	.set_freq = rkvdec_3368_set_freq,
1689 	.reduce_freq = rkvdec_reduce_freq,
1690 	.reset = rkvdec_reset,
1691 	.set_grf = rkvdec_3368_set_grf,
1692 };
1693 
1694 static struct mpp_dev_ops rkvdec_v1_dev_ops = {
1695 	.alloc_task = rkvdec_alloc_task,
1696 	.run = rkvdec_run,
1697 	.irq = rkvdec_irq,
1698 	.isr = rkvdec_isr,
1699 	.finish = rkvdec_finish,
1700 	.result = rkvdec_result,
1701 	.free_task = rkvdec_free_task,
1702 };
1703 
1704 static struct mpp_hw_ops rkvdec_3328_hw_ops = {
1705 	.init = rkvdec_3328_init,
1706 	.exit = rkvdec_3328_exit,
1707 	.clk_on = rkvdec_clk_on,
1708 	.clk_off = rkvdec_clk_off,
1709 	.get_freq = rkvdec_3328_get_freq,
1710 	.set_freq = rkvdec_3328_set_freq,
1711 	.reduce_freq = rkvdec_3328_reduce_freq,
1712 	.reset = rkvdec_sip_reset,
1713 };
1714 
1715 static struct mpp_dev_ops rkvdec_3328_dev_ops = {
1716 	.alloc_task = rkvdec_alloc_task,
1717 	.run = rkvdec_3328_run,
1718 	.irq = rkvdec_irq,
1719 	.isr = rkvdec_3328_isr,
1720 	.finish = rkvdec_finish,
1721 	.result = rkvdec_result,
1722 	.free_task = rkvdec_free_task,
1723 };
1724 
1725 static struct mpp_dev_ops rkvdec_3399_dev_ops = {
1726 	.alloc_task = rkvdec_alloc_task,
1727 	.prepare = rkvdec_prepare_with_reset,
1728 	.run = rkvdec_run,
1729 	.irq = rkvdec_irq,
1730 	.isr = rkvdec_isr,
1731 	.finish = rkvdec_finish_with_record_info,
1732 	.result = rkvdec_result,
1733 	.free_task = rkvdec_free_task,
1734 };
1735 
1736 static struct mpp_dev_ops rkvdec_1126_dev_ops = {
1737 	.alloc_task = rkvdec_alloc_task,
1738 	.run = rkvdec_1126_run,
1739 	.irq = rkvdec_irq,
1740 	.isr = rkvdec_isr,
1741 	.finish = rkvdec_finish,
1742 	.result = rkvdec_result,
1743 	.free_task = rkvdec_free_task,
1744 };
1745 static const struct mpp_dev_var rk_hevcdec_data = {
1746 	.device_type = MPP_DEVICE_HEVC_DEC,
1747 	.hw_info = &rk_hevcdec_hw_info,
1748 	.trans_info = rk_hevcdec_trans,
1749 	.hw_ops = &rkvdec_v1_hw_ops,
1750 	.dev_ops = &rkvdec_v1_dev_ops,
1751 };
1752 
1753 static const struct mpp_dev_var rk_hevcdec_3368_data = {
1754 	.device_type = MPP_DEVICE_HEVC_DEC,
1755 	.hw_info = &rk_hevcdec_hw_info,
1756 	.trans_info = rk_hevcdec_trans,
1757 	.hw_ops = &rkvdec_3368_hw_ops,
1758 	.dev_ops = &rkvdec_v1_dev_ops,
1759 };
1760 
1761 static const struct mpp_dev_var rkvdec_v1_data = {
1762 	.device_type = MPP_DEVICE_RKVDEC,
1763 	.hw_info = &rkvdec_v1_hw_info,
1764 	.trans_info = rkvdec_v1_trans,
1765 	.hw_ops = &rkvdec_v1_hw_ops,
1766 	.dev_ops = &rkvdec_v1_dev_ops,
1767 };
1768 
1769 static const struct mpp_dev_var rkvdec_3399_data = {
1770 	.device_type = MPP_DEVICE_RKVDEC,
1771 	.hw_info = &rkvdec_v1_hw_info,
1772 	.trans_info = rkvdec_v1_trans,
1773 	.hw_ops = &rkvdec_3399_hw_ops,
1774 	.dev_ops = &rkvdec_3399_dev_ops,
1775 };
1776 
1777 static const struct mpp_dev_var rkvdec_3328_data = {
1778 	.device_type = MPP_DEVICE_RKVDEC,
1779 	.hw_info = &rkvdec_v1_hw_info,
1780 	.trans_info = rkvdec_v1_trans,
1781 	.hw_ops = &rkvdec_3328_hw_ops,
1782 	.dev_ops = &rkvdec_3328_dev_ops,
1783 };
1784 
1785 static const struct mpp_dev_var rkvdec_1126_data = {
1786 	.device_type = MPP_DEVICE_RKVDEC,
1787 	.hw_info = &rkvdec_v1_hw_info,
1788 	.trans_info = rkvdec_v1_trans,
1789 	.hw_ops = &rkvdec_v1_hw_ops,
1790 	.dev_ops = &rkvdec_1126_dev_ops,
1791 };
1792 
1793 static const struct of_device_id mpp_rkvdec_dt_match[] = {
1794 	{
1795 		.compatible = "rockchip,hevc-decoder",
1796 		.data = &rk_hevcdec_data,
1797 	},
1798 #ifdef CONFIG_CPU_RK3368
1799 	{
1800 		.compatible = "rockchip,hevc-decoder-rk3368",
1801 		.data = &rk_hevcdec_3368_data,
1802 	},
1803 #endif
1804 	{
1805 		.compatible = "rockchip,rkv-decoder-v1",
1806 		.data = &rkvdec_v1_data,
1807 	},
1808 #ifdef CONFIG_CPU_RK3399
1809 	{
1810 		.compatible = "rockchip,rkv-decoder-rk3399",
1811 		.data = &rkvdec_3399_data,
1812 	},
1813 #endif
1814 #ifdef CONFIG_CPU_RK3328
1815 	{
1816 		.compatible = "rockchip,rkv-decoder-rk3328",
1817 		.data = &rkvdec_3328_data,
1818 	},
1819 #endif
1820 #ifdef CONFIG_CPU_RV1126
1821 	{
1822 		.compatible = "rockchip,rkv-decoder-rv1126",
1823 		.data = &rkvdec_1126_data,
1824 	},
1825 #endif
1826 	{},
1827 };
1828 
rkvdec_probe(struct platform_device * pdev)1829 static int rkvdec_probe(struct platform_device *pdev)
1830 {
1831 	struct device *dev = &pdev->dev;
1832 	struct rkvdec_dev *dec = NULL;
1833 	struct mpp_dev *mpp = NULL;
1834 	const struct of_device_id *match = NULL;
1835 	int ret = 0;
1836 
1837 	dev_info(dev, "probing start\n");
1838 	dec = devm_kzalloc(dev, sizeof(*dec), GFP_KERNEL);
1839 	if (!dec)
1840 		return -ENOMEM;
1841 
1842 	mpp = &dec->mpp;
1843 	platform_set_drvdata(pdev, mpp);
1844 
1845 	if (pdev->dev.of_node) {
1846 		match = of_match_node(mpp_rkvdec_dt_match,
1847 				      pdev->dev.of_node);
1848 		if (match)
1849 			mpp->var = (struct mpp_dev_var *)match->data;
1850 	}
1851 
1852 	ret = mpp_dev_probe(mpp, pdev);
1853 	if (ret) {
1854 		dev_err(dev, "probe sub driver failed\n");
1855 		return ret;
1856 	}
1857 
1858 	ret = devm_request_threaded_irq(dev, mpp->irq,
1859 					mpp_dev_irq,
1860 					mpp_dev_isr_sched,
1861 					IRQF_SHARED,
1862 					dev_name(dev), mpp);
1863 	if (ret) {
1864 		dev_err(dev, "register interrupter runtime failed\n");
1865 		return -EINVAL;
1866 	}
1867 
1868 	mpp->session_max_buffers = RKVDEC_SESSION_MAX_BUFFERS;
1869 	rkvdec_procfs_init(mpp);
1870 	/* register current device to mpp service */
1871 	mpp_dev_register_srv(mpp, mpp->srv);
1872 	dev_info(dev, "probing finish\n");
1873 
1874 	return 0;
1875 }
1876 
rkvdec_remove(struct platform_device * pdev)1877 static int rkvdec_remove(struct platform_device *pdev)
1878 {
1879 	struct device *dev = &pdev->dev;
1880 	struct rkvdec_dev *dec = platform_get_drvdata(pdev);
1881 
1882 	dev_info(dev, "remove device\n");
1883 	mpp_dev_remove(&dec->mpp);
1884 	rkvdec_procfs_remove(&dec->mpp);
1885 
1886 	return 0;
1887 }
1888 
1889 struct platform_driver rockchip_rkvdec_driver = {
1890 	.probe = rkvdec_probe,
1891 	.remove = rkvdec_remove,
1892 	.shutdown = mpp_dev_shutdown,
1893 	.driver = {
1894 		.name = RKVDEC_DRIVER_NAME,
1895 		.of_match_table = of_match_ptr(mpp_rkvdec_dt_match),
1896 	},
1897 };
1898 EXPORT_SYMBOL(rockchip_rkvdec_driver);
1899