• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2 /*
3  * Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd
4  *
5  * author:
6  *	Alpha Lin, alpha.lin@rock-chips.com
7  *	Randy Li, randy.li@rock-chips.com
8  *	Ding Wei, leo.ding@rock-chips.com
9  *
10  */
11 #include <asm/cacheflush.h>
12 #include <linux/clk.h>
13 #include <linux/delay.h>
14 #include <linux/devfreq.h>
15 #include <linux/devfreq_cooling.h>
16 #include <linux/gfp.h>
17 #include <linux/interrupt.h>
18 #include <linux/iopoll.h>
19 #include <linux/module.h>
20 #include <linux/types.h>
21 #include <linux/of_platform.h>
22 #include <linux/slab.h>
23 #include <linux/uaccess.h>
24 #include <linux/regmap.h>
25 #include <linux/kernel.h>
26 #include <linux/thermal.h>
27 #include <linux/notifier.h>
28 #include <linux/proc_fs.h>
29 #include <linux/rockchip/rockchip_sip.h>
30 #include <linux/regulator/consumer.h>
31 
32 #include <soc/rockchip/pm_domains.h>
33 #include <soc/rockchip/rockchip_sip.h>
34 #include <soc/rockchip/rockchip_opp_select.h>
35 
36 #include "mpp_debug.h"
37 #include "mpp_common.h"
38 #include "mpp_iommu.h"
39 
40 #include "hack/mpp_hack_px30.h"
41 
42 #define RKVDEC_DRIVER_NAME		"mpp_rkvdec"
43 
44 #define IOMMU_GET_BUS_ID(x)		(((x) >> 6) & 0x1f)
45 #define IOMMU_PAGE_SIZE			SZ_4K
46 
47 #define	RKVDEC_SESSION_MAX_BUFFERS	40
48 /* The maximum registers number of all the version */
49 #define HEVC_DEC_REG_NUM		68
50 #define HEVC_DEC_REG_HW_ID_INDEX	0
51 #define HEVC_DEC_REG_START_INDEX	0
52 #define HEVC_DEC_REG_END_INDEX		67
53 
54 #define RKVDEC_V1_REG_NUM		78
55 #define RKVDEC_V1_REG_HW_ID_INDEX	0
56 #define RKVDEC_V1_REG_START_INDEX	0
57 #define RKVDEC_V1_REG_END_INDEX		77
58 
59 #define RKVDEC_V2_REG_NUM		109
60 #define RKVDEC_V2_REG_HW_ID_INDEX	0
61 #define RKVDEC_V2_REG_START_INDEX	0
62 #define RKVDEC_V2_REG_END_INDEX		108
63 
64 #define RKVDEC_REG_INT_EN		0x004
65 #define RKVDEC_REG_INT_EN_INDEX		(1)
66 #define RKVDEC_WR_DDR_ALIGN_EN		BIT(23)
67 #define RKVDEC_FORCE_SOFT_RESET_VALID	BIT(21)
68 #define RKVDEC_SOFTWARE_RESET_EN	BIT(20)
69 #define RKVDEC_INT_COLMV_REF_ERROR	BIT(17)
70 #define RKVDEC_INT_BUF_EMPTY		BIT(16)
71 #define RKVDEC_INT_TIMEOUT		BIT(15)
72 #define RKVDEC_INT_STRM_ERROR		BIT(14)
73 #define RKVDEC_INT_BUS_ERROR		BIT(13)
74 #define RKVDEC_DEC_INT_RAW		BIT(9)
75 #define RKVDEC_DEC_INT			BIT(8)
76 #define RKVDEC_DEC_TIMEOUT_EN		BIT(5)
77 #define RKVDEC_DEC_IRQ_DIS		BIT(4)
78 #define RKVDEC_CLOCK_GATE_EN		BIT(1)
79 #define RKVDEC_DEC_START		BIT(0)
80 
81 #define RKVDEC_REG_SYS_CTRL		0x008
82 #define RKVDEC_REG_SYS_CTRL_INDEX	(2)
83 #define RKVDEC_RGE_WIDTH_INDEX		(3)
84 #define RKVDEC_GET_FORMAT(x)		(((x) >> 20) & 0x3)
85 #define REVDEC_GET_PROD_NUM(x)		(((x) >> 16) & 0xffff)
86 #define RKVDEC_GET_WIDTH(x)		(((x) & 0x3ff) << 4)
87 #define RKVDEC_FMT_H265D		(0)
88 #define RKVDEC_FMT_H264D		(1)
89 #define RKVDEC_FMT_VP9D			(2)
90 
91 #define RKVDEC_REG_RLC_BASE		0x010
92 #define RKVDEC_REG_RLC_BASE_INDEX	(4)
93 
94 #define RKVDEC_RGE_YSTRDE_INDEX		(8)
95 #define RKVDEC_GET_YSTRDE(x)		(((x) & 0x1fffff) << 4)
96 
97 #define RKVDEC_REG_PPS_BASE		0x0a0
98 #define RKVDEC_REG_PPS_BASE_INDEX	(42)
99 
100 #define RKVDEC_REG_VP9_REFCOLMV_BASE		0x0d0
101 #define RKVDEC_REG_VP9_REFCOLMV_BASE_INDEX	(52)
102 
103 #define RKVDEC_REG_CACHE0_SIZE_BASE	0x41c
104 #define RKVDEC_REG_CACHE1_SIZE_BASE	0x45c
105 #define RKVDEC_REG_CLR_CACHE0_BASE	0x410
106 #define RKVDEC_REG_CLR_CACHE1_BASE	0x450
107 
108 #define RKVDEC_CACHE_PERMIT_CACHEABLE_ACCESS	BIT(0)
109 #define RKVDEC_CACHE_PERMIT_READ_ALLOCATE	BIT(1)
110 #define RKVDEC_CACHE_LINE_SIZE_64_BYTES		BIT(4)
111 
112 #define RKVDEC_POWER_CTL_INDEX		(99)
113 #define RKVDEC_POWER_CTL_BASE		0x018c
114 
115 #define FALLBACK_STATIC_TEMPERATURE	55000
116 
117 #define to_rkvdec_task(task)		\
118 		container_of(task, struct rkvdec_task, mpp_task)
119 #define to_rkvdec_dev(dev)		\
120 		container_of(dev, struct rkvdec_dev, mpp)
121 
122 enum RKVDEC_MODE {
123 	RKVDEC_MODE_NONE,
124 	RKVDEC_MODE_ONEFRAME,
125 	RKVDEC_MODE_BUTT
126 };
127 
128 enum SET_CLK_EVENT {
129 	EVENT_POWER_ON = 0,
130 	EVENT_POWER_OFF,
131 	EVENT_ADJUST,
132 	EVENT_THERMAL,
133 	EVENT_BUTT,
134 };
135 
136 struct rkvdec_task {
137 	struct mpp_task mpp_task;
138 
139 	enum RKVDEC_MODE link_mode;
140 	enum MPP_CLOCK_MODE clk_mode;
141 	u32 reg[RKVDEC_V2_REG_NUM];
142 	struct reg_offset_info off_inf;
143 
144 	u32 strm_addr;
145 	u32 irq_status;
146 	/* req for current task */
147 	u32 w_req_cnt;
148 	struct mpp_request w_reqs[MPP_MAX_MSG_NUM];
149 	u32 r_req_cnt;
150 	struct mpp_request r_reqs[MPP_MAX_MSG_NUM];
151 	/* ystride info */
152 	u32 pixels;
153 };
154 
155 struct rkvdec_dev {
156 	struct mpp_dev mpp;
157 	/* sip smc reset lock */
158 	struct mutex sip_reset_lock;
159 
160 	struct mpp_clk_info aclk_info;
161 	struct mpp_clk_info hclk_info;
162 	struct mpp_clk_info core_clk_info;
163 	struct mpp_clk_info cabac_clk_info;
164 	struct mpp_clk_info hevc_cabac_clk_info;
165 	u32 default_max_load;
166 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
167 	struct proc_dir_entry *procfs;
168 #endif
169 	struct reset_control *rst_a;
170 	struct reset_control *rst_h;
171 	struct reset_control *rst_niu_a;
172 	struct reset_control *rst_niu_h;
173 	struct reset_control *rst_core;
174 	struct reset_control *rst_cabac;
175 	struct reset_control *rst_hevc_cabac;
176 
177 	unsigned long aux_iova;
178 	struct page *aux_page;
179 #ifdef CONFIG_PM_DEVFREQ
180 	struct regulator *vdd;
181 	struct devfreq *devfreq;
182 	struct devfreq *parent_devfreq;
183 	struct notifier_block devfreq_nb;
184 	struct thermal_cooling_device *devfreq_cooling;
185 	struct thermal_zone_device *thermal_zone;
186 	u32 static_power_coeff;
187 	s32 ts[4];
188 	/* set clk lock */
189 	struct mutex set_clk_lock;
190 	unsigned int thermal_div;
191 	unsigned long volt;
192 	unsigned long devf_aclk_rate_hz;
193 	unsigned long devf_core_rate_hz;
194 	unsigned long devf_cabac_rate_hz;
195 #endif
196 	/* record last infos */
197 	u32 last_fmt;
198 	bool had_reset;
199 	bool grf_changed;
200 };
201 
202 /*
203  * hardware information
204  */
205 static struct mpp_hw_info rk_hevcdec_hw_info = {
206 	.reg_num = HEVC_DEC_REG_NUM,
207 	.reg_id = HEVC_DEC_REG_HW_ID_INDEX,
208 	.reg_start = HEVC_DEC_REG_START_INDEX,
209 	.reg_end = HEVC_DEC_REG_END_INDEX,
210 	.reg_en = RKVDEC_REG_INT_EN_INDEX,
211 };
212 
213 static struct mpp_hw_info rkvdec_v1_hw_info = {
214 	.reg_num = RKVDEC_V1_REG_NUM,
215 	.reg_id = RKVDEC_V1_REG_HW_ID_INDEX,
216 	.reg_start = RKVDEC_V1_REG_START_INDEX,
217 	.reg_end = RKVDEC_V1_REG_END_INDEX,
218 	.reg_en = RKVDEC_REG_INT_EN_INDEX,
219 };
220 
221 /*
222  * file handle translate information
223  */
224 static const u16 trans_tbl_h264d[] = {
225 	4, 6, 7, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
226 	23, 24, 41, 42, 43, 48, 75
227 };
228 
229 static const u16 trans_tbl_h265d[] = {
230 	4, 6, 7, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
231 	23, 24, 42, 43
232 };
233 
234 static const u16 trans_tbl_vp9d[] = {
235 	4, 6, 7, 11, 12, 13, 14, 15, 16
236 };
237 
238 static struct mpp_trans_info rk_hevcdec_trans[] = {
239 	[RKVDEC_FMT_H265D] = {
240 		.count = ARRAY_SIZE(trans_tbl_h265d),
241 		.table = trans_tbl_h265d,
242 	},
243 };
244 
245 static struct mpp_trans_info rkvdec_v1_trans[] = {
246 	[RKVDEC_FMT_H265D] = {
247 		.count = ARRAY_SIZE(trans_tbl_h265d),
248 		.table = trans_tbl_h265d,
249 	},
250 	[RKVDEC_FMT_H264D] = {
251 		.count = ARRAY_SIZE(trans_tbl_h264d),
252 		.table = trans_tbl_h264d,
253 	},
254 	[RKVDEC_FMT_VP9D] = {
255 		.count = ARRAY_SIZE(trans_tbl_vp9d),
256 		.table = trans_tbl_vp9d,
257 	},
258 };
259 
260 #ifdef CONFIG_PM_DEVFREQ
rkvdec_devf_set_clk(struct rkvdec_dev * dec,unsigned long aclk_rate_hz,unsigned long core_rate_hz,unsigned long cabac_rate_hz,unsigned int event)261 static int rkvdec_devf_set_clk(struct rkvdec_dev *dec,
262 			       unsigned long aclk_rate_hz,
263 			       unsigned long core_rate_hz,
264 			       unsigned long cabac_rate_hz,
265 			       unsigned int event)
266 {
267 	struct clk *aclk = dec->aclk_info.clk;
268 	struct clk *clk_core = dec->core_clk_info.clk;
269 	struct clk *clk_cabac = dec->cabac_clk_info.clk;
270 
271 	mutex_lock(&dec->set_clk_lock);
272 
273 	switch (event) {
274 	case EVENT_POWER_ON:
275 		clk_set_rate(aclk, dec->devf_aclk_rate_hz);
276 		clk_set_rate(clk_core, dec->devf_core_rate_hz);
277 		clk_set_rate(clk_cabac, dec->devf_cabac_rate_hz);
278 		dec->thermal_div = 0;
279 		break;
280 	case EVENT_POWER_OFF:
281 		clk_set_rate(aclk, aclk_rate_hz);
282 		clk_set_rate(clk_core, core_rate_hz);
283 		clk_set_rate(clk_cabac, cabac_rate_hz);
284 		dec->thermal_div = 0;
285 		break;
286 	case EVENT_ADJUST:
287 		if (!dec->thermal_div) {
288 			clk_set_rate(aclk, aclk_rate_hz);
289 			clk_set_rate(clk_core, core_rate_hz);
290 			clk_set_rate(clk_cabac, cabac_rate_hz);
291 		} else {
292 			clk_set_rate(aclk,
293 				     aclk_rate_hz / dec->thermal_div);
294 			clk_set_rate(clk_core,
295 				     core_rate_hz / dec->thermal_div);
296 			clk_set_rate(clk_cabac,
297 				     cabac_rate_hz / dec->thermal_div);
298 		}
299 		dec->devf_aclk_rate_hz = aclk_rate_hz;
300 		dec->devf_core_rate_hz = core_rate_hz;
301 		dec->devf_cabac_rate_hz = cabac_rate_hz;
302 		break;
303 	case EVENT_THERMAL:
304 		dec->thermal_div = dec->devf_aclk_rate_hz / aclk_rate_hz;
305 		if (dec->thermal_div > 4)
306 			dec->thermal_div = 4;
307 		if (dec->thermal_div) {
308 			clk_set_rate(aclk,
309 				     dec->devf_aclk_rate_hz / dec->thermal_div);
310 			clk_set_rate(clk_core,
311 				     dec->devf_core_rate_hz / dec->thermal_div);
312 			clk_set_rate(clk_cabac,
313 				     dec->devf_cabac_rate_hz / dec->thermal_div);
314 		}
315 		break;
316 	}
317 
318 	mutex_unlock(&dec->set_clk_lock);
319 
320 	return 0;
321 }
322 
devfreq_target(struct device * dev,unsigned long * freq,u32 flags)323 static int devfreq_target(struct device *dev,
324 			  unsigned long *freq, u32 flags)
325 {
326 	int ret = 0;
327 	unsigned int clk_event;
328 	struct dev_pm_opp *opp;
329 	unsigned long target_volt, target_freq;
330 	unsigned long aclk_rate_hz, core_rate_hz, cabac_rate_hz;
331 
332 	struct rkvdec_dev *dec = dev_get_drvdata(dev);
333 	struct devfreq *devfreq = dec->devfreq;
334 	struct devfreq_dev_status *stat = &devfreq->last_status;
335 	unsigned long old_clk_rate = stat->current_frequency;
336 
337 	opp = devfreq_recommended_opp(dev, freq, flags);
338 	if (IS_ERR(opp)) {
339 		dev_err(dev, "Failed to find opp for %lu Hz\n", *freq);
340 		return PTR_ERR(opp);
341 	}
342 	target_freq = dev_pm_opp_get_freq(opp);
343 	target_volt = dev_pm_opp_get_voltage(opp);
344 	dev_pm_opp_put(opp);
345 
346 	if (target_freq < *freq) {
347 		clk_event = EVENT_THERMAL;
348 		aclk_rate_hz = target_freq;
349 		core_rate_hz = target_freq;
350 		cabac_rate_hz = target_freq;
351 	} else {
352 		clk_event = stat->busy_time ? EVENT_POWER_ON : EVENT_POWER_OFF;
353 		aclk_rate_hz = dec->devf_aclk_rate_hz;
354 		core_rate_hz = dec->devf_core_rate_hz;
355 		cabac_rate_hz = dec->devf_cabac_rate_hz;
356 	}
357 
358 	if (old_clk_rate == target_freq) {
359 		if (dec->volt == target_volt)
360 			return ret;
361 		ret = regulator_set_voltage(dec->vdd, target_volt, INT_MAX);
362 		if (ret) {
363 			dev_err(dev, "Cannot set voltage %lu uV\n",
364 				target_volt);
365 			return ret;
366 		}
367 		dec->volt = target_volt;
368 		return 0;
369 	}
370 
371 	if (old_clk_rate < target_freq) {
372 		ret = regulator_set_voltage(dec->vdd, target_volt, INT_MAX);
373 		if (ret) {
374 			dev_err(dev, "set voltage %lu uV\n", target_volt);
375 			return ret;
376 		}
377 	}
378 
379 	dev_dbg(dev, "%lu-->%lu\n", old_clk_rate, target_freq);
380 	rkvdec_devf_set_clk(dec, aclk_rate_hz, core_rate_hz, cabac_rate_hz, clk_event);
381 	stat->current_frequency = target_freq;
382 
383 	if (old_clk_rate > target_freq) {
384 		ret = regulator_set_voltage(dec->vdd, target_volt, INT_MAX);
385 		if (ret) {
386 			dev_err(dev, "set vol %lu uV\n", target_volt);
387 			return ret;
388 		}
389 	}
390 	dec->volt = target_volt;
391 
392 	return ret;
393 }
394 
devfreq_get_cur_freq(struct device * dev,unsigned long * freq)395 static int devfreq_get_cur_freq(struct device *dev,
396 				unsigned long *freq)
397 {
398 	struct rkvdec_dev *dec = dev_get_drvdata(dev);
399 
400 	*freq = clk_get_rate(dec->aclk_info.clk);
401 
402 	return 0;
403 }
404 
devfreq_get_dev_status(struct device * dev,struct devfreq_dev_status * stat)405 static int devfreq_get_dev_status(struct device *dev,
406 				  struct devfreq_dev_status *stat)
407 {
408 	struct rkvdec_dev *dec = dev_get_drvdata(dev);
409 	struct devfreq *devfreq = dec->devfreq;
410 
411 	memcpy(stat, &devfreq->last_status, sizeof(*stat));
412 
413 	return 0;
414 }
415 
416 static struct devfreq_dev_profile devfreq_profile = {
417 	.target	= devfreq_target,
418 	.get_cur_freq = devfreq_get_cur_freq,
419 	.get_dev_status	= devfreq_get_dev_status,
420 };
421 
422 static unsigned long
model_static_power(struct devfreq * devfreq,unsigned long voltage)423 model_static_power(struct devfreq *devfreq,
424 		   unsigned long voltage)
425 {
426 	struct device *dev = devfreq->dev.parent;
427 	struct rkvdec_dev *dec = dev_get_drvdata(dev);
428 	struct thermal_zone_device *tz = dec->thermal_zone;
429 
430 	int temperature;
431 	unsigned long temp;
432 	unsigned long temp_squared, temp_cubed, temp_scaling_factor;
433 	const unsigned long voltage_cubed = (voltage * voltage * voltage) >> 10;
434 
435 	if (!IS_ERR_OR_NULL(tz) && tz->ops->get_temp) {
436 		int ret;
437 
438 		ret = tz->ops->get_temp(tz, &temperature);
439 		if (ret) {
440 			dev_warn_ratelimited(dev, "ddr thermal zone failed\n");
441 			temperature = FALLBACK_STATIC_TEMPERATURE;
442 		}
443 	} else {
444 		temperature = FALLBACK_STATIC_TEMPERATURE;
445 	}
446 
447 	/*
448 	 * Calculate the temperature scaling factor. To be applied to the
449 	 * voltage scaled power.
450 	 */
451 	temp = temperature / 1000;
452 	temp_squared = temp * temp;
453 	temp_cubed = temp_squared * temp;
454 	temp_scaling_factor = (dec->ts[3] * temp_cubed)
455 	    + (dec->ts[2] * temp_squared) + (dec->ts[1] * temp) + dec->ts[0];
456 
457 	return (((dec->static_power_coeff * voltage_cubed) >> 20)
458 		* temp_scaling_factor) / 1000000;
459 }
460 
461 static struct devfreq_cooling_power cooling_power_data = {
462 	.get_static_power = model_static_power,
463 	.dyn_power_coeff = 120,
464 };
465 
power_model_simple_init(struct mpp_dev * mpp)466 static int power_model_simple_init(struct mpp_dev *mpp)
467 {
468 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
469 	struct device_node *np = mpp->dev->of_node;
470 
471 	u32 temp;
472 	const char *tz_name;
473 	struct device_node *power_model_node;
474 
475 	power_model_node = of_get_child_by_name(np, "vcodec_power_model");
476 	if (!power_model_node) {
477 		dev_err(mpp->dev, "could not find power_model node\n");
478 		return -ENODEV;
479 	}
480 
481 	if (of_property_read_string(power_model_node,
482 				    "thermal-zone",
483 				    &tz_name)) {
484 		dev_err(mpp->dev, "ts in power_model not available\n");
485 		return -EINVAL;
486 	}
487 
488 	dec->thermal_zone = thermal_zone_get_zone_by_name(tz_name);
489 	if (IS_ERR(dec->thermal_zone)) {
490 		pr_warn("Error getting ddr thermal zone, not yet ready?\n");
491 		dec->thermal_zone = NULL;
492 		return -EPROBE_DEFER;
493 	}
494 
495 	if (of_property_read_u32(power_model_node,
496 				 "static-power-coefficient",
497 				 &dec->static_power_coeff)) {
498 		dev_err(mpp->dev, "static-power-coefficient not available\n");
499 		return -EINVAL;
500 	}
501 	if (of_property_read_u32(power_model_node,
502 				 "dynamic-power-coefficient",
503 				 &temp)) {
504 		dev_err(mpp->dev, "dynamic-power-coefficient not available\n");
505 		return -EINVAL;
506 	}
507 	cooling_power_data.dyn_power_coeff = (unsigned long)temp;
508 
509 	if (of_property_read_u32_array(power_model_node,
510 				       "ts",
511 				       (u32 *)dec->ts,
512 				       4)) {
513 		dev_err(mpp->dev, "ts in power_model not available\n");
514 		return -EINVAL;
515 	}
516 
517 	return 0;
518 }
519 
devfreq_notifier_call(struct notifier_block * nb,unsigned long event,void * data)520 static int devfreq_notifier_call(struct notifier_block *nb,
521 				 unsigned long event,
522 				 void *data)
523 {
524 	struct rkvdec_dev *dec = container_of(nb,
525 					      struct rkvdec_dev,
526 					      devfreq_nb);
527 
528 	if (!dec)
529 		return NOTIFY_OK;
530 
531 	if (event == DEVFREQ_PRECHANGE)
532 		mutex_lock(&dec->sip_reset_lock);
533 	else if (event == DEVFREQ_POSTCHANGE)
534 		mutex_unlock(&dec->sip_reset_lock);
535 
536 	return NOTIFY_OK;
537 }
538 #endif
539 
540 /*
541  * NOTE: rkvdec/rkhevc put scaling list address in pps buffer hardware will read
542  * it by pps id in video stream data.
543  *
544  * So we need to translate the address in iommu case. The address data is also
545  * 10bit fd + 22bit offset mode.
546  * Because userspace decoder do not give the pps id in the register file sets
547  * kernel driver need to translate each scaling list address in pps buffer which
548  * means 256 pps for H.264, 64 pps for H.265.
549  *
550  * In order to optimize the performance kernel driver ask userspace decoder to
551  * set all scaling list address in pps buffer to the same one which will be used
552  * on current decoding task. Then kernel driver can only translate the first
553  * address then copy it all pps buffer.
554  */
fill_scaling_list_pps(struct rkvdec_task * task,int fd,int offset,int count,int pps_info_size,int sub_addr_offset)555 static int fill_scaling_list_pps(struct rkvdec_task *task,
556 				 int fd, int offset, int count,
557 				 int pps_info_size, int sub_addr_offset)
558 {
559 	struct dma_buf *dmabuf = NULL;
560 	void *vaddr = NULL;
561 	u8 *pps = NULL;
562 	u32 scaling_fd = 0;
563 	int ret = 0;
564 	u32 base = sub_addr_offset;
565 
566 	dmabuf = dma_buf_get(fd);
567 	if (IS_ERR_OR_NULL(dmabuf)) {
568 		mpp_err("invliad pps buffer\n");
569 		return -ENOENT;
570 	}
571 
572 	ret = dma_buf_begin_cpu_access(dmabuf, DMA_FROM_DEVICE);
573 	if (ret) {
574 		mpp_err("can't access the pps buffer\n");
575 		goto done;
576 	}
577 
578 	vaddr = dma_buf_vmap(dmabuf);
579 	if (!vaddr) {
580 		mpp_err("can't access the pps buffer\n");
581 		ret = -EIO;
582 		goto done;
583 	}
584 	pps = vaddr + offset;
585 	/* NOTE: scaling buffer in pps, have no offset */
586 	memcpy(&scaling_fd, pps + base, sizeof(scaling_fd));
587 	scaling_fd = le32_to_cpu(scaling_fd);
588 	if (scaling_fd > 0) {
589 		struct mpp_mem_region *mem_region = NULL;
590 		u32 tmp = 0;
591 		int i = 0;
592 
593 		mem_region = mpp_task_attach_fd(&task->mpp_task,
594 						scaling_fd);
595 		if (IS_ERR(mem_region)) {
596 			ret = PTR_ERR(mem_region);
597 			goto done;
598 		}
599 
600 		tmp = mem_region->iova & 0xffffffff;
601 		tmp = cpu_to_le32(tmp);
602 		mpp_debug(DEBUG_PPS_FILL,
603 			  "pps at %p, scaling fd: %3d => %pad + offset %10d\n",
604 			  pps, scaling_fd, &mem_region->iova, offset);
605 
606 		/* Fill the scaling list address in each pps entries */
607 		for (i = 0; i < count; i++, base += pps_info_size)
608 			memcpy(pps + base, &tmp, sizeof(tmp));
609 	}
610 
611 done:
612 	dma_buf_vunmap(dmabuf, vaddr);
613 	dma_buf_end_cpu_access(dmabuf, DMA_FROM_DEVICE);
614 	dma_buf_put(dmabuf);
615 
616 	return ret;
617 }
618 
rkvdec_process_scl_fd(struct mpp_session * session,struct rkvdec_task * task,struct mpp_task_msgs * msgs)619 static int rkvdec_process_scl_fd(struct mpp_session *session,
620 				 struct rkvdec_task *task,
621 				 struct mpp_task_msgs *msgs)
622 {
623 	int ret = 0;
624 	int pps_fd;
625 	u32 pps_offset;
626 	int idx = RKVDEC_REG_PPS_BASE_INDEX;
627 	u32 fmt = RKVDEC_GET_FORMAT(task->reg[RKVDEC_REG_SYS_CTRL_INDEX]);
628 
629 	if (session->msg_flags & MPP_FLAGS_REG_NO_OFFSET) {
630 		pps_fd = task->reg[idx];
631 		pps_offset = 0;
632 	} else {
633 		pps_fd = task->reg[idx] & 0x3ff;
634 		pps_offset = task->reg[idx] >> 10;
635 	}
636 
637 	pps_offset += mpp_query_reg_offset_info(&task->off_inf, idx);
638 	if (pps_fd > 0) {
639 		int pps_info_offset;
640 		int pps_info_count;
641 		int pps_info_size;
642 		int scaling_list_addr_offset;
643 
644 		switch (fmt) {
645 		case RKVDEC_FMT_H264D:
646 			pps_info_offset = pps_offset;
647 			pps_info_count = 256;
648 			pps_info_size = 32;
649 			scaling_list_addr_offset = 23;
650 			break;
651 		case RKVDEC_FMT_H265D:
652 			pps_info_offset = pps_offset;
653 			pps_info_count = 64;
654 			pps_info_size = 80;
655 			scaling_list_addr_offset = 74;
656 			break;
657 		default:
658 			pps_info_offset = 0;
659 			pps_info_count = 0;
660 			pps_info_size = 0;
661 			scaling_list_addr_offset = 0;
662 			break;
663 		}
664 
665 		mpp_debug(DEBUG_PPS_FILL,
666 			  "scaling list filling parameter:\n");
667 		mpp_debug(DEBUG_PPS_FILL,
668 			  "pps_info_offset %d\n", pps_info_offset);
669 		mpp_debug(DEBUG_PPS_FILL,
670 			  "pps_info_count  %d\n", pps_info_count);
671 		mpp_debug(DEBUG_PPS_FILL,
672 			  "pps_info_size   %d\n", pps_info_size);
673 		mpp_debug(DEBUG_PPS_FILL,
674 			  "scaling_list_addr_offset %d\n",
675 			  scaling_list_addr_offset);
676 
677 		if (pps_info_count) {
678 			ret = fill_scaling_list_pps(task, pps_fd,
679 						    pps_info_offset,
680 						    pps_info_count,
681 						    pps_info_size,
682 						    scaling_list_addr_offset);
683 			if (ret) {
684 				mpp_err("fill pps failed\n");
685 				goto fail;
686 			}
687 		}
688 	}
689 
690 fail:
691 	return ret;
692 }
693 
rkvdec_process_reg_fd(struct mpp_session * session,struct rkvdec_task * task,struct mpp_task_msgs * msgs)694 static int rkvdec_process_reg_fd(struct mpp_session *session,
695 				 struct rkvdec_task *task,
696 				 struct mpp_task_msgs *msgs)
697 {
698 	int ret = 0;
699 	u32 fmt = RKVDEC_GET_FORMAT(task->reg[RKVDEC_REG_SYS_CTRL_INDEX]);
700 
701 	/*
702 	 * special offset scale case
703 	 *
704 	 * This translation is for fd + offset translation.
705 	 * One register has 32bits. We need to transfer both buffer file
706 	 * handle and the start address offset so we packet file handle
707 	 * and offset together using below format.
708 	 *
709 	 *  0~9  bit for buffer file handle range 0 ~ 1023
710 	 * 10~31 bit for offset range 0 ~ 4M
711 	 *
712 	 * But on 4K case the offset can be larger the 4M
713 	 * So on VP9 4K decoder colmv base we scale the offset by 16
714 	 */
715 	if (fmt == RKVDEC_FMT_VP9D) {
716 		int fd;
717 		u32 offset;
718 		dma_addr_t iova = 0;
719 		struct mpp_mem_region *mem_region = NULL;
720 		int idx = RKVDEC_REG_VP9_REFCOLMV_BASE_INDEX;
721 
722 		if (session->msg_flags & MPP_FLAGS_REG_NO_OFFSET) {
723 			fd = task->reg[idx];
724 			offset = 0;
725 		} else {
726 			fd = task->reg[idx] & 0x3ff;
727 			offset = task->reg[idx] >> 10 << 4;
728 		}
729 		mem_region = mpp_task_attach_fd(&task->mpp_task, fd);
730 		if (IS_ERR(mem_region))
731 			return -EFAULT;
732 
733 		iova = mem_region->iova;
734 		task->reg[idx] = iova + offset;
735 	}
736 
737 	ret = mpp_translate_reg_address(session, &task->mpp_task,
738 					fmt, task->reg, &task->off_inf);
739 	if (ret)
740 		return ret;
741 
742 	mpp_translate_reg_offset_info(&task->mpp_task,
743 				      &task->off_inf, task->reg);
744 	return 0;
745 }
746 
rkvdec_extract_task_msg(struct rkvdec_task * task,struct mpp_task_msgs * msgs)747 static int rkvdec_extract_task_msg(struct rkvdec_task *task,
748 				   struct mpp_task_msgs *msgs)
749 {
750 	u32 i;
751 	int ret;
752 	struct mpp_request *req;
753 	struct mpp_hw_info *hw_info = task->mpp_task.hw_info;
754 
755 	for (i = 0; i < msgs->req_cnt; i++) {
756 		u32 off_s, off_e;
757 
758 		req = &msgs->reqs[i];
759 		if (!req->size)
760 			continue;
761 
762 		switch (req->cmd) {
763 		case MPP_CMD_SET_REG_WRITE: {
764 			off_s = hw_info->reg_start * sizeof(u32);
765 			off_e = hw_info->reg_end * sizeof(u32);
766 			ret = mpp_check_req(req, 0, sizeof(task->reg),
767 					    off_s, off_e);
768 			if (ret)
769 				continue;
770 			if (copy_from_user((u8 *)task->reg + req->offset,
771 					   req->data, req->size)) {
772 				mpp_err("copy_from_user reg failed\n");
773 				return -EIO;
774 			}
775 			memcpy(&task->w_reqs[task->w_req_cnt++],
776 			       req, sizeof(*req));
777 		} break;
778 		case MPP_CMD_SET_REG_READ: {
779 			off_s = hw_info->reg_start * sizeof(u32);
780 			off_e = hw_info->reg_end * sizeof(u32);
781 			ret = mpp_check_req(req, 0, sizeof(task->reg),
782 					    off_s, off_e);
783 			if (ret)
784 				continue;
785 			memcpy(&task->r_reqs[task->r_req_cnt++],
786 			       req, sizeof(*req));
787 		} break;
788 		case MPP_CMD_SET_REG_ADDR_OFFSET: {
789 			mpp_extract_reg_offset_info(&task->off_inf, req);
790 		} break;
791 		default:
792 			break;
793 		}
794 	}
795 	mpp_debug(DEBUG_TASK_INFO, "w_req_cnt %d, r_req_cnt %d\n",
796 		  task->w_req_cnt, task->r_req_cnt);
797 
798 	return 0;
799 }
800 
rkvdec_alloc_task(struct mpp_session * session,struct mpp_task_msgs * msgs)801 static void *rkvdec_alloc_task(struct mpp_session *session,
802 			       struct mpp_task_msgs *msgs)
803 {
804 	int ret;
805 	struct mpp_task *mpp_task = NULL;
806 	struct rkvdec_task *task = NULL;
807 	struct mpp_dev *mpp = session->mpp;
808 
809 	mpp_debug_enter();
810 
811 	task = kzalloc(sizeof(*task), GFP_KERNEL);
812 	if (!task)
813 		return NULL;
814 
815 	mpp_task = &task->mpp_task;
816 	mpp_task_init(session, mpp_task);
817 	mpp_task->hw_info = mpp->var->hw_info;
818 	mpp_task->reg = task->reg;
819 	/* extract reqs for current task */
820 	ret = rkvdec_extract_task_msg(task, msgs);
821 	if (ret)
822 		goto fail;
823 	/* process fd in pps for 264 and 265 */
824 	if (!(msgs->flags & MPP_FLAGS_SCL_FD_NO_TRANS)) {
825 		ret = rkvdec_process_scl_fd(session, task, msgs);
826 		if (ret)
827 			goto fail;
828 	}
829 	/* process fd in register */
830 	if (!(msgs->flags & MPP_FLAGS_REG_FD_NO_TRANS)) {
831 		ret = rkvdec_process_reg_fd(session, task, msgs);
832 		if (ret)
833 			goto fail;
834 	}
835 	task->strm_addr = task->reg[RKVDEC_REG_RLC_BASE_INDEX];
836 	task->link_mode = RKVDEC_MODE_ONEFRAME;
837 	task->clk_mode = CLK_MODE_NORMAL;
838 
839 	/* get resolution info */
840 	task->pixels = RKVDEC_GET_YSTRDE(task->reg[RKVDEC_RGE_YSTRDE_INDEX]);
841 	mpp_debug(DEBUG_TASK_INFO, "ystride=%d\n", task->pixels);
842 
843 	mpp_debug_leave();
844 
845 	return mpp_task;
846 
847 fail:
848 	mpp_task_dump_mem_region(mpp, mpp_task);
849 	mpp_task_dump_reg(mpp, mpp_task);
850 	mpp_task_finalize(session, mpp_task);
851 	kfree(task);
852 	return NULL;
853 }
854 
rkvdec_prepare_with_reset(struct mpp_dev * mpp,struct mpp_task * mpp_task)855 static void *rkvdec_prepare_with_reset(struct mpp_dev *mpp,
856 				       struct mpp_task *mpp_task)
857 {
858 	unsigned long flags;
859 	struct mpp_task *out_task = NULL;
860 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
861 
862 	spin_lock_irqsave(&mpp->queue->running_lock, flags);
863 	out_task = list_empty(&mpp->queue->running_list) ? mpp_task : NULL;
864 	spin_unlock_irqrestore(&mpp->queue->running_lock, flags);
865 
866 	if (out_task && !dec->had_reset) {
867 		struct rkvdec_task *task = to_rkvdec_task(out_task);
868 		u32 fmt = RKVDEC_GET_FORMAT(task->reg[RKVDEC_REG_SYS_CTRL_INDEX]);
869 
870 		/* in 3399 3228 and 3229 chips, when 264 switch vp9,
871 		 * hardware will timeout, and can't recover problem.
872 		 * so reset it when 264 switch vp9, before hardware run.
873 		 */
874 		if (dec->last_fmt == RKVDEC_FMT_H264D && fmt == RKVDEC_FMT_VP9D) {
875 			mpp_power_on(mpp);
876 			mpp_dev_reset(mpp);
877 			mpp_power_off(mpp);
878 		}
879 	}
880 
881 	return out_task;
882 }
883 
rkvdec_run(struct mpp_dev * mpp,struct mpp_task * mpp_task)884 static int rkvdec_run(struct mpp_dev *mpp,
885 		      struct mpp_task *mpp_task)
886 {
887 	int i;
888 	u32 reg_en;
889 	struct rkvdec_task *task = NULL;
890 
891 	mpp_debug_enter();
892 
893 	task = to_rkvdec_task(mpp_task);
894 	reg_en = mpp_task->hw_info->reg_en;
895 	switch (task->link_mode) {
896 	case RKVDEC_MODE_ONEFRAME: {
897 		u32 reg;
898 
899 		/* set cache size */
900 		reg = RKVDEC_CACHE_PERMIT_CACHEABLE_ACCESS
901 			| RKVDEC_CACHE_PERMIT_READ_ALLOCATE;
902 		if (!mpp_debug_unlikely(DEBUG_CACHE_32B))
903 			reg |= RKVDEC_CACHE_LINE_SIZE_64_BYTES;
904 
905 		mpp_write_relaxed(mpp, RKVDEC_REG_CACHE0_SIZE_BASE, reg);
906 		mpp_write_relaxed(mpp, RKVDEC_REG_CACHE1_SIZE_BASE, reg);
907 		/* clear cache */
908 		mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE0_BASE, 1);
909 		mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE1_BASE, 1);
910 		/* set registers for hardware */
911 		for (i = 0; i < task->w_req_cnt; i++) {
912 			int s, e;
913 			struct mpp_request *req = &task->w_reqs[i];
914 
915 			s = req->offset / sizeof(u32);
916 			e = s + req->size / sizeof(u32);
917 			mpp_write_req(mpp, task->reg, s, e, reg_en);
918 		}
919 		/* init current task */
920 		mpp->cur_task = mpp_task;
921 		/* Flush the register before the start the device */
922 		wmb();
923 		mpp_write(mpp, RKVDEC_REG_INT_EN,
924 			  task->reg[reg_en] | RKVDEC_DEC_START);
925 	} break;
926 	default:
927 		break;
928 	}
929 
930 	mpp_debug_leave();
931 
932 	return 0;
933 }
934 
rkvdec_3328_run(struct mpp_dev * mpp,struct mpp_task * mpp_task)935 static int rkvdec_3328_run(struct mpp_dev *mpp,
936 			   struct mpp_task *mpp_task)
937 {
938 	u32 fmt = 0;
939 	u32 cfg = 0;
940 	struct rkvdec_task *task = NULL;
941 
942 	mpp_debug_enter();
943 
944 	task = to_rkvdec_task(mpp_task);
945 
946 	/*
947 	 * HW defeat workaround: VP9 power save optimization cause decoding
948 	 * corruption, disable optimization here.
949 	 */
950 	fmt = RKVDEC_GET_FORMAT(task->reg[RKVDEC_REG_SYS_CTRL_INDEX]);
951 	if (fmt == RKVDEC_FMT_VP9D) {
952 		cfg = task->reg[RKVDEC_POWER_CTL_INDEX] | 0xFFFF;
953 		task->reg[RKVDEC_POWER_CTL_INDEX] = cfg & (~(1 << 12));
954 		mpp_write_relaxed(mpp, RKVDEC_POWER_CTL_BASE,
955 				  task->reg[RKVDEC_POWER_CTL_INDEX]);
956 	}
957 
958 	rkvdec_run(mpp, mpp_task);
959 
960 	mpp_debug_leave();
961 
962 	return 0;
963 }
964 
rkvdec_1126_run(struct mpp_dev * mpp,struct mpp_task * mpp_task)965 static int rkvdec_1126_run(struct mpp_dev *mpp, struct mpp_task *mpp_task)
966 {
967 	struct rkvdec_task *task = to_rkvdec_task(mpp_task);
968 
969 	if (task->link_mode == RKVDEC_MODE_ONEFRAME)
970 		mpp_iommu_flush_tlb(mpp->iommu_info);
971 
972 	return rkvdec_run(mpp, mpp_task);
973 }
974 
rkvdec_irq(struct mpp_dev * mpp)975 static int rkvdec_irq(struct mpp_dev *mpp)
976 {
977 	mpp->irq_status = mpp_read(mpp, RKVDEC_REG_INT_EN);
978 	if (!(mpp->irq_status & RKVDEC_DEC_INT_RAW))
979 		return IRQ_NONE;
980 
981 	mpp_write(mpp, RKVDEC_REG_INT_EN, 0);
982 
983 	return IRQ_WAKE_THREAD;
984 }
985 
rkvdec_isr(struct mpp_dev * mpp)986 static int rkvdec_isr(struct mpp_dev *mpp)
987 {
988 	u32 err_mask;
989 	struct rkvdec_task *task = NULL;
990 	struct mpp_task *mpp_task = mpp->cur_task;
991 
992 	mpp_debug_enter();
993 	/* FIXME use a spin lock here */
994 	if (!mpp_task) {
995 		dev_err(mpp->dev, "no current task\n");
996 		goto done;
997 	}
998 	mpp_time_diff(mpp_task);
999 	mpp->cur_task = NULL;
1000 	task = to_rkvdec_task(mpp_task);
1001 	task->irq_status = mpp->irq_status;
1002 	switch (task->link_mode) {
1003 	case RKVDEC_MODE_ONEFRAME: {
1004 		mpp_debug(DEBUG_IRQ_STATUS, "irq_status: %08x\n", task->irq_status);
1005 
1006 		err_mask = RKVDEC_INT_BUF_EMPTY
1007 			| RKVDEC_INT_BUS_ERROR
1008 			| RKVDEC_INT_COLMV_REF_ERROR
1009 			| RKVDEC_INT_STRM_ERROR
1010 			| RKVDEC_INT_TIMEOUT;
1011 
1012 		if (err_mask & task->irq_status)
1013 			atomic_inc(&mpp->reset_request);
1014 
1015 		mpp_task_finish(mpp_task->session, mpp_task);
1016 	} break;
1017 	default:
1018 		break;
1019 	}
1020 done:
1021 	mpp_debug_leave();
1022 	return IRQ_HANDLED;
1023 }
1024 
rkvdec_3328_isr(struct mpp_dev * mpp)1025 static int rkvdec_3328_isr(struct mpp_dev *mpp)
1026 {
1027 	u32 err_mask;
1028 	struct rkvdec_task *task = NULL;
1029 	struct mpp_task *mpp_task = mpp->cur_task;
1030 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1031 
1032 	mpp_debug_enter();
1033 	/* FIXME use a spin lock here */
1034 	if (!mpp_task) {
1035 		dev_err(mpp->dev, "no current task\n");
1036 		goto done;
1037 	}
1038 	mpp_time_diff(mpp_task);
1039 	mpp->cur_task = NULL;
1040 	task = to_rkvdec_task(mpp_task);
1041 	task->irq_status = mpp->irq_status;
1042 	mpp_debug(DEBUG_IRQ_STATUS, "irq_status: %08x\n", task->irq_status);
1043 
1044 	err_mask = RKVDEC_INT_BUF_EMPTY
1045 		| RKVDEC_INT_BUS_ERROR
1046 		| RKVDEC_INT_COLMV_REF_ERROR
1047 		| RKVDEC_INT_STRM_ERROR
1048 		| RKVDEC_INT_TIMEOUT;
1049 	if (err_mask & task->irq_status)
1050 		atomic_inc(&mpp->reset_request);
1051 
1052 	/* unmap reserve buffer */
1053 	if (dec->aux_iova != -1) {
1054 		iommu_unmap(mpp->iommu_info->domain, dec->aux_iova, IOMMU_PAGE_SIZE);
1055 		dec->aux_iova = -1;
1056 	}
1057 
1058 	mpp_task_finish(mpp_task->session, mpp_task);
1059 done:
1060 	mpp_debug_leave();
1061 	return IRQ_HANDLED;
1062 }
1063 
rkvdec_finish(struct mpp_dev * mpp,struct mpp_task * mpp_task)1064 static int rkvdec_finish(struct mpp_dev *mpp,
1065 			 struct mpp_task *mpp_task)
1066 {
1067 	u32 i;
1068 	u32 dec_get;
1069 	s32 dec_length;
1070 	struct rkvdec_task *task = to_rkvdec_task(mpp_task);
1071 
1072 	mpp_debug_enter();
1073 
1074 	switch (task->link_mode) {
1075 	case RKVDEC_MODE_ONEFRAME: {
1076 		u32 s, e;
1077 		struct mpp_request *req;
1078 
1079 		/* read register after running */
1080 		for (i = 0; i < task->r_req_cnt; i++) {
1081 			req = &task->r_reqs[i];
1082 			s = req->offset / sizeof(u32);
1083 			e = s + req->size / sizeof(u32);
1084 			mpp_read_req(mpp, task->reg, s, e);
1085 		}
1086 		/* revert hack for irq status */
1087 		task->reg[RKVDEC_REG_INT_EN_INDEX] = task->irq_status;
1088 		/* revert hack for decoded length */
1089 		dec_get = mpp_read_relaxed(mpp, RKVDEC_REG_RLC_BASE);
1090 		dec_length = dec_get - task->strm_addr;
1091 		task->reg[RKVDEC_REG_RLC_BASE_INDEX] = dec_length << 10;
1092 		mpp_debug(DEBUG_REGISTER,
1093 			  "dec_get %08x dec_length %d\n", dec_get, dec_length);
1094 	} break;
1095 	default:
1096 		break;
1097 	}
1098 
1099 	mpp_debug_leave();
1100 
1101 	return 0;
1102 }
1103 
rkvdec_finish_with_record_info(struct mpp_dev * mpp,struct mpp_task * mpp_task)1104 static int rkvdec_finish_with_record_info(struct mpp_dev *mpp,
1105 					  struct mpp_task *mpp_task)
1106 {
1107 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1108 	struct rkvdec_task *task = to_rkvdec_task(mpp_task);
1109 
1110 	rkvdec_finish(mpp, mpp_task);
1111 	dec->last_fmt = RKVDEC_GET_FORMAT(task->reg[RKVDEC_REG_SYS_CTRL_INDEX]);
1112 	dec->had_reset = (atomic_read(&mpp->reset_request) > 0) ? true : false;
1113 
1114 	return 0;
1115 }
1116 
rkvdec_result(struct mpp_dev * mpp,struct mpp_task * mpp_task,struct mpp_task_msgs * msgs)1117 static int rkvdec_result(struct mpp_dev *mpp,
1118 			 struct mpp_task *mpp_task,
1119 			 struct mpp_task_msgs *msgs)
1120 {
1121 	u32 i;
1122 	struct mpp_request *req;
1123 	struct rkvdec_task *task = to_rkvdec_task(mpp_task);
1124 
1125 	/* FIXME may overflow the kernel */
1126 	for (i = 0; i < task->r_req_cnt; i++) {
1127 		req = &task->r_reqs[i];
1128 
1129 		if (copy_to_user(req->data,
1130 				 (u8 *)task->reg + req->offset,
1131 				 req->size)) {
1132 			mpp_err("copy_to_user reg fail\n");
1133 			return -EIO;
1134 		}
1135 	}
1136 
1137 	return 0;
1138 }
1139 
rkvdec_free_task(struct mpp_session * session,struct mpp_task * mpp_task)1140 static int rkvdec_free_task(struct mpp_session *session,
1141 			    struct mpp_task *mpp_task)
1142 {
1143 	struct rkvdec_task *task = to_rkvdec_task(mpp_task);
1144 
1145 	mpp_task_finalize(session, mpp_task);
1146 	kfree(task);
1147 
1148 	return 0;
1149 }
1150 
1151 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
rkvdec_procfs_remove(struct mpp_dev * mpp)1152 static int rkvdec_procfs_remove(struct mpp_dev *mpp)
1153 {
1154 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1155 
1156 	if (dec->procfs) {
1157 		proc_remove(dec->procfs);
1158 		dec->procfs = NULL;
1159 	}
1160 
1161 	return 0;
1162 }
1163 
rkvdec_procfs_init(struct mpp_dev * mpp)1164 static int rkvdec_procfs_init(struct mpp_dev *mpp)
1165 {
1166 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1167 
1168 	dec->procfs = proc_mkdir(mpp->dev->of_node->name, mpp->srv->procfs);
1169 	if (IS_ERR_OR_NULL(dec->procfs)) {
1170 		mpp_err("failed on open procfs\n");
1171 		dec->procfs = NULL;
1172 		return -EIO;
1173 	}
1174 	mpp_procfs_create_u32("aclk", 0644,
1175 			      dec->procfs, &dec->aclk_info.debug_rate_hz);
1176 	mpp_procfs_create_u32("clk_core", 0644,
1177 			      dec->procfs, &dec->core_clk_info.debug_rate_hz);
1178 	mpp_procfs_create_u32("clk_cabac", 0644,
1179 			      dec->procfs, &dec->cabac_clk_info.debug_rate_hz);
1180 	mpp_procfs_create_u32("clk_hevc_cabac", 0644,
1181 			      dec->procfs, &dec->hevc_cabac_clk_info.debug_rate_hz);
1182 	mpp_procfs_create_u32("session_buffers", 0644,
1183 			      dec->procfs, &mpp->session_max_buffers);
1184 
1185 	return 0;
1186 }
1187 #else
rkvdec_procfs_remove(struct mpp_dev * mpp)1188 static inline int rkvdec_procfs_remove(struct mpp_dev *mpp)
1189 {
1190 	return 0;
1191 }
1192 
rkvdec_procfs_init(struct mpp_dev * mpp)1193 static inline int rkvdec_procfs_init(struct mpp_dev *mpp)
1194 {
1195 	return 0;
1196 }
1197 #endif
1198 
rkvdec_init(struct mpp_dev * mpp)1199 static int rkvdec_init(struct mpp_dev *mpp)
1200 {
1201 	int ret;
1202 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1203 
1204 	mutex_init(&dec->sip_reset_lock);
1205 	mpp->grf_info = &mpp->srv->grf_infos[MPP_DRIVER_RKVDEC];
1206 
1207 	/* Get clock info from dtsi */
1208 	ret = mpp_get_clk_info(mpp, &dec->aclk_info, "aclk_vcodec");
1209 	if (ret)
1210 		mpp_err("failed on clk_get aclk_vcodec\n");
1211 	ret = mpp_get_clk_info(mpp, &dec->hclk_info, "hclk_vcodec");
1212 	if (ret)
1213 		mpp_err("failed on clk_get hclk_vcodec\n");
1214 	ret = mpp_get_clk_info(mpp, &dec->core_clk_info, "clk_core");
1215 	if (ret)
1216 		mpp_err("failed on clk_get clk_core\n");
1217 	ret = mpp_get_clk_info(mpp, &dec->cabac_clk_info, "clk_cabac");
1218 	if (ret)
1219 		mpp_err("failed on clk_get clk_cabac\n");
1220 	ret = mpp_get_clk_info(mpp, &dec->hevc_cabac_clk_info, "clk_hevc_cabac");
1221 	if (ret)
1222 		mpp_err("failed on clk_get clk_hevc_cabac\n");
1223 	/* Set default rates */
1224 	mpp_set_clk_info_rate_hz(&dec->aclk_info, CLK_MODE_DEFAULT, 300 * MHZ);
1225 	mpp_set_clk_info_rate_hz(&dec->core_clk_info, CLK_MODE_DEFAULT, 200 * MHZ);
1226 	mpp_set_clk_info_rate_hz(&dec->cabac_clk_info, CLK_MODE_DEFAULT, 200 * MHZ);
1227 	mpp_set_clk_info_rate_hz(&dec->hevc_cabac_clk_info, CLK_MODE_DEFAULT, 300 * MHZ);
1228 
1229 	/* Get normal max workload from dtsi */
1230 	of_property_read_u32(mpp->dev->of_node,
1231 			     "rockchip,default-max-load", &dec->default_max_load);
1232 	/* Get reset control from dtsi */
1233 	dec->rst_a = mpp_reset_control_get(mpp, RST_TYPE_A, "video_a");
1234 	if (!dec->rst_a)
1235 		mpp_err("No aclk reset resource define\n");
1236 	dec->rst_h = mpp_reset_control_get(mpp, RST_TYPE_H, "video_h");
1237 	if (!dec->rst_h)
1238 		mpp_err("No hclk reset resource define\n");
1239 	dec->rst_niu_a = mpp_reset_control_get(mpp, RST_TYPE_NIU_A, "niu_a");
1240 	if (!dec->rst_niu_a)
1241 		mpp_err("No niu aclk reset resource define\n");
1242 	dec->rst_niu_h = mpp_reset_control_get(mpp, RST_TYPE_NIU_H, "niu_h");
1243 	if (!dec->rst_niu_h)
1244 		mpp_err("No niu hclk reset resource define\n");
1245 	dec->rst_core = mpp_reset_control_get(mpp, RST_TYPE_CORE, "video_core");
1246 	if (!dec->rst_core)
1247 		mpp_err("No core reset resource define\n");
1248 	dec->rst_cabac = mpp_reset_control_get(mpp, RST_TYPE_CABAC, "video_cabac");
1249 	if (!dec->rst_cabac)
1250 		mpp_err("No cabac reset resource define\n");
1251 	dec->rst_hevc_cabac = mpp_reset_control_get(mpp, RST_TYPE_HEVC_CABAC, "video_hevc_cabac");
1252 	if (!dec->rst_hevc_cabac)
1253 		mpp_err("No hevc cabac reset resource define\n");
1254 
1255 	return 0;
1256 }
1257 
rkvdec_px30_init(struct mpp_dev * mpp)1258 static int rkvdec_px30_init(struct mpp_dev *mpp)
1259 {
1260 	rkvdec_init(mpp);
1261 	return px30_workaround_combo_init(mpp);
1262 }
1263 
rkvdec_3328_iommu_hdl(struct iommu_domain * iommu,struct device * iommu_dev,unsigned long iova,int status,void * arg)1264 static int rkvdec_3328_iommu_hdl(struct iommu_domain *iommu,
1265 				 struct device *iommu_dev,
1266 				 unsigned long iova,
1267 				 int status, void *arg)
1268 {
1269 	int ret = 0;
1270 	struct mpp_dev *mpp = (struct mpp_dev *)arg;
1271 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1272 
1273 	/*
1274 	 * defeat workaround, invalidate address generated when rk322x
1275 	 * hevc decoder tile mode pre-fetch colmv data.
1276 	 */
1277 	if (IOMMU_GET_BUS_ID(status) == 2) {
1278 		unsigned long page_iova = 0;
1279 		/* avoid another page fault occur after page fault */
1280 		if (dec->aux_iova != -1) {
1281 			iommu_unmap(mpp->iommu_info->domain, dec->aux_iova, IOMMU_PAGE_SIZE);
1282 			dec->aux_iova = -1;
1283 		}
1284 
1285 		page_iova = round_down(iova, IOMMU_PAGE_SIZE);
1286 		ret = iommu_map(mpp->iommu_info->domain, page_iova,
1287 				page_to_phys(dec->aux_page), IOMMU_PAGE_SIZE,
1288 				IOMMU_READ | IOMMU_WRITE);
1289 		if (!ret)
1290 			dec->aux_iova = page_iova;
1291 	}
1292 
1293 	return ret;
1294 }
1295 
1296 #ifdef CONFIG_PM_DEVFREQ
rkvdec_devfreq_remove(struct mpp_dev * mpp)1297 static int rkvdec_devfreq_remove(struct mpp_dev *mpp)
1298 {
1299 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1300 
1301 	devfreq_unregister_opp_notifier(mpp->dev, dec->devfreq);
1302 	dev_pm_opp_of_remove_table(mpp->dev);
1303 
1304 	return 0;
1305 }
1306 
rkvdec_devfreq_init(struct mpp_dev * mpp)1307 static int rkvdec_devfreq_init(struct mpp_dev *mpp)
1308 {
1309 	int ret = 0;
1310 	struct devfreq_dev_status *stat;
1311 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1312 
1313 	mutex_init(&dec->set_clk_lock);
1314 	dec->parent_devfreq = devfreq_get_devfreq_by_phandle(mpp->dev, "rkvdec_devfreq", 0);
1315 	if (IS_ERR_OR_NULL(dec->parent_devfreq)) {
1316 		if (PTR_ERR(dec->parent_devfreq) == -EPROBE_DEFER) {
1317 			dev_warn(mpp->dev, "parent devfreq is not ready, retry\n");
1318 
1319 			return -EPROBE_DEFER;
1320 		}
1321 	} else {
1322 		dec->devfreq_nb.notifier_call = devfreq_notifier_call;
1323 		devm_devfreq_register_notifier(mpp->dev,
1324 					       dec->parent_devfreq,
1325 					       &dec->devfreq_nb,
1326 					       DEVFREQ_TRANSITION_NOTIFIER);
1327 	}
1328 
1329 	dec->vdd = devm_regulator_get_optional(mpp->dev, "vcodec");
1330 	if (IS_ERR_OR_NULL(dec->vdd)) {
1331 		if (PTR_ERR(dec->vdd) == -EPROBE_DEFER) {
1332 			dev_warn(mpp->dev, "vcodec regulator not ready, retry\n");
1333 
1334 			return -EPROBE_DEFER;
1335 		}
1336 		dev_warn(mpp->dev, "no regulator for vcodec\n");
1337 
1338 		return 0;
1339 	}
1340 
1341 	ret = rockchip_init_opp_table(mpp->dev, NULL,
1342 				      "rkvdec_leakage", "vcodec");
1343 	if (ret) {
1344 		dev_err(mpp->dev, "Failed to init_opp_table\n");
1345 		goto done;
1346 	}
1347 	dec->devfreq = devm_devfreq_add_device(mpp->dev, &devfreq_profile,
1348 					       "userspace", NULL);
1349 	if (IS_ERR(dec->devfreq)) {
1350 		ret = PTR_ERR(dec->devfreq);
1351 		goto done;
1352 	}
1353 
1354 	stat = &dec->devfreq->last_status;
1355 	stat->current_frequency = clk_get_rate(dec->aclk_info.clk);
1356 
1357 	ret = devfreq_register_opp_notifier(mpp->dev, dec->devfreq);
1358 	if (ret)
1359 		goto done;
1360 
1361 	/* power simplle init */
1362 	ret = power_model_simple_init(mpp);
1363 	if (!ret && dec->devfreq) {
1364 		dec->devfreq_cooling =
1365 			of_devfreq_cooling_register_power(mpp->dev->of_node,
1366 							  dec->devfreq,
1367 							  &cooling_power_data);
1368 		if (IS_ERR_OR_NULL(dec->devfreq_cooling)) {
1369 			ret = -ENXIO;
1370 			dev_err(mpp->dev, "Failed to register cooling\n");
1371 			goto done;
1372 		}
1373 	}
1374 
1375 done:
1376 	return ret;
1377 }
1378 #else
rkvdec_devfreq_remove(struct mpp_dev * mpp)1379 static inline int rkvdec_devfreq_remove(struct mpp_dev *mpp)
1380 {
1381 	return 0;
1382 }
1383 
rkvdec_devfreq_init(struct mpp_dev * mpp)1384 static inline int rkvdec_devfreq_init(struct mpp_dev *mpp)
1385 {
1386 	return 0;
1387 }
1388 #endif
1389 
rkvdec_3328_init(struct mpp_dev * mpp)1390 static int rkvdec_3328_init(struct mpp_dev *mpp)
1391 {
1392 	int ret = 0;
1393 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1394 
1395 	rkvdec_init(mpp);
1396 
1397 	/* warkaround for mmu pagefault */
1398 	dec->aux_page = alloc_page(GFP_KERNEL);
1399 	if (!dec->aux_page) {
1400 		dev_err(mpp->dev, "allocate a page for auxiliary usage\n");
1401 		ret = -ENOMEM;
1402 		goto done;
1403 	}
1404 	dec->aux_iova = -1;
1405 	mpp->iommu_info->hdl = rkvdec_3328_iommu_hdl;
1406 
1407 	ret = rkvdec_devfreq_init(mpp);
1408 done:
1409 	return ret;
1410 }
1411 
rkvdec_3328_exit(struct mpp_dev * mpp)1412 static int rkvdec_3328_exit(struct mpp_dev *mpp)
1413 {
1414 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1415 
1416 	if (dec->aux_page)
1417 		__free_page(dec->aux_page);
1418 
1419 	if (dec->aux_iova != -1) {
1420 		iommu_unmap(mpp->iommu_info->domain, dec->aux_iova, IOMMU_PAGE_SIZE);
1421 		dec->aux_iova = -1;
1422 	}
1423 	rkvdec_devfreq_remove(mpp);
1424 
1425 	return 0;
1426 }
1427 
rkvdec_clk_on(struct mpp_dev * mpp)1428 static int rkvdec_clk_on(struct mpp_dev *mpp)
1429 {
1430 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1431 
1432 	mpp_clk_safe_enable(dec->aclk_info.clk);
1433 	mpp_clk_safe_enable(dec->hclk_info.clk);
1434 	mpp_clk_safe_enable(dec->core_clk_info.clk);
1435 	mpp_clk_safe_enable(dec->cabac_clk_info.clk);
1436 	mpp_clk_safe_enable(dec->hevc_cabac_clk_info.clk);
1437 
1438 	return 0;
1439 }
1440 
rkvdec_clk_off(struct mpp_dev * mpp)1441 static int rkvdec_clk_off(struct mpp_dev *mpp)
1442 {
1443 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1444 
1445 	clk_disable_unprepare(dec->aclk_info.clk);
1446 	clk_disable_unprepare(dec->hclk_info.clk);
1447 	clk_disable_unprepare(dec->core_clk_info.clk);
1448 	clk_disable_unprepare(dec->cabac_clk_info.clk);
1449 	clk_disable_unprepare(dec->hevc_cabac_clk_info.clk);
1450 
1451 	return 0;
1452 }
1453 
rkvdec_get_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)1454 static int rkvdec_get_freq(struct mpp_dev *mpp,
1455 			   struct mpp_task *mpp_task)
1456 {
1457 	u32 task_cnt;
1458 	u32 workload;
1459 	struct mpp_task *loop = NULL, *n;
1460 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1461 	struct rkvdec_task *task = to_rkvdec_task(mpp_task);
1462 
1463 	/* if not set max load, consider not have advanced mode */
1464 	if (!dec->default_max_load || !task->pixels)
1465 		return 0;
1466 
1467 	task_cnt = 1;
1468 	workload = task->pixels;
1469 	/* calc workload in pending list */
1470 	mutex_lock(&mpp->queue->pending_lock);
1471 	list_for_each_entry_safe(loop, n,
1472 				 &mpp->queue->pending_list,
1473 				 queue_link) {
1474 		struct rkvdec_task *loop_task = to_rkvdec_task(loop);
1475 
1476 		task_cnt++;
1477 		workload += loop_task->pixels;
1478 	}
1479 	mutex_unlock(&mpp->queue->pending_lock);
1480 
1481 	if (workload > dec->default_max_load)
1482 		task->clk_mode = CLK_MODE_ADVANCED;
1483 
1484 	mpp_debug(DEBUG_TASK_INFO, "pending task %d, workload %d, clk_mode=%d\n",
1485 		  task_cnt, workload, task->clk_mode);
1486 
1487 	return 0;
1488 }
1489 
rkvdec_3328_get_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)1490 static int rkvdec_3328_get_freq(struct mpp_dev *mpp,
1491 				struct mpp_task *mpp_task)
1492 {
1493 	u32 fmt;
1494 	u32 ddr_align_en;
1495 	struct rkvdec_task *task =  to_rkvdec_task(mpp_task);
1496 
1497 	fmt = RKVDEC_GET_FORMAT(task->reg[RKVDEC_REG_SYS_CTRL_INDEX]);
1498 	ddr_align_en = task->reg[RKVDEC_REG_INT_EN_INDEX] & RKVDEC_WR_DDR_ALIGN_EN;
1499 	if (fmt == RKVDEC_FMT_H264D && ddr_align_en)
1500 		task->clk_mode = CLK_MODE_ADVANCED;
1501 	else
1502 		rkvdec_get_freq(mpp, mpp_task);
1503 
1504 	return 0;
1505 }
1506 
rkvdec_3368_set_grf(struct mpp_dev * mpp)1507 static int rkvdec_3368_set_grf(struct mpp_dev *mpp)
1508 {
1509 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1510 
1511 	dec->grf_changed = mpp_grf_is_changed(mpp->grf_info);
1512 	mpp_set_grf(mpp->grf_info);
1513 
1514 	return 0;
1515 }
1516 
rkvdec_set_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)1517 static int rkvdec_set_freq(struct mpp_dev *mpp,
1518 			   struct mpp_task *mpp_task)
1519 {
1520 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1521 	struct rkvdec_task *task =  to_rkvdec_task(mpp_task);
1522 
1523 	mpp_clk_set_rate(&dec->aclk_info, task->clk_mode);
1524 	mpp_clk_set_rate(&dec->core_clk_info, task->clk_mode);
1525 	mpp_clk_set_rate(&dec->cabac_clk_info, task->clk_mode);
1526 	mpp_clk_set_rate(&dec->hevc_cabac_clk_info, task->clk_mode);
1527 
1528 	return 0;
1529 }
1530 
rkvdec_3368_set_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)1531 static int rkvdec_3368_set_freq(struct mpp_dev *mpp, struct mpp_task *mpp_task)
1532 {
1533 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1534 	struct rkvdec_task *task =  to_rkvdec_task(mpp_task);
1535 
1536 	/* if grf changed, need reset iommu for rk3368 */
1537 	if (dec->grf_changed) {
1538 		mpp_iommu_refresh(mpp->iommu_info, mpp->dev);
1539 		dec->grf_changed = false;
1540 	}
1541 
1542 	mpp_clk_set_rate(&dec->aclk_info, task->clk_mode);
1543 	mpp_clk_set_rate(&dec->core_clk_info, task->clk_mode);
1544 	mpp_clk_set_rate(&dec->cabac_clk_info, task->clk_mode);
1545 	mpp_clk_set_rate(&dec->hevc_cabac_clk_info, task->clk_mode);
1546 
1547 	return 0;
1548 }
1549 
rkvdec_3328_set_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)1550 static int rkvdec_3328_set_freq(struct mpp_dev *mpp,
1551 				struct mpp_task *mpp_task)
1552 {
1553 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1554 	struct rkvdec_task *task =  to_rkvdec_task(mpp_task);
1555 
1556 #ifdef CONFIG_PM_DEVFREQ
1557 	if (dec->devfreq) {
1558 		struct devfreq_dev_status *stat;
1559 		unsigned long aclk_rate_hz, core_rate_hz, cabac_rate_hz;
1560 
1561 		stat = &dec->devfreq->last_status;
1562 		stat->busy_time = 1;
1563 		stat->total_time = 1;
1564 		aclk_rate_hz = mpp_get_clk_info_rate_hz(&dec->aclk_info,
1565 							task->clk_mode);
1566 		core_rate_hz = mpp_get_clk_info_rate_hz(&dec->core_clk_info,
1567 							task->clk_mode);
1568 		cabac_rate_hz = mpp_get_clk_info_rate_hz(&dec->cabac_clk_info,
1569 							 task->clk_mode);
1570 		rkvdec_devf_set_clk(dec, aclk_rate_hz,
1571 				    core_rate_hz, cabac_rate_hz,
1572 				    EVENT_ADJUST);
1573 	}
1574 #else
1575 	mpp_clk_set_rate(&dec->aclk_info, task->clk_mode);
1576 	mpp_clk_set_rate(&dec->core_clk_info, task->clk_mode);
1577 	mpp_clk_set_rate(&dec->cabac_clk_info, task->clk_mode);
1578 #endif
1579 
1580 	return 0;
1581 }
1582 
rkvdec_reduce_freq(struct mpp_dev * mpp)1583 static int rkvdec_reduce_freq(struct mpp_dev *mpp)
1584 {
1585 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1586 
1587 	mpp_clk_set_rate(&dec->aclk_info, CLK_MODE_REDUCE);
1588 	mpp_clk_set_rate(&dec->core_clk_info, CLK_MODE_REDUCE);
1589 	mpp_clk_set_rate(&dec->cabac_clk_info, CLK_MODE_REDUCE);
1590 	mpp_clk_set_rate(&dec->hevc_cabac_clk_info, CLK_MODE_REDUCE);
1591 
1592 	return 0;
1593 }
1594 
rkvdec_3328_reduce_freq(struct mpp_dev * mpp)1595 static int rkvdec_3328_reduce_freq(struct mpp_dev *mpp)
1596 {
1597 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1598 
1599 #ifdef CONFIG_PM_DEVFREQ
1600 	if (dec->devfreq) {
1601 		struct devfreq_dev_status *stat;
1602 		unsigned long aclk_rate_hz, core_rate_hz, cabac_rate_hz;
1603 
1604 		stat = &dec->devfreq->last_status;
1605 		stat->busy_time = 0;
1606 		stat->total_time = 1;
1607 		aclk_rate_hz = mpp_get_clk_info_rate_hz(&dec->aclk_info,
1608 							CLK_MODE_REDUCE);
1609 		core_rate_hz = mpp_get_clk_info_rate_hz(&dec->core_clk_info,
1610 							CLK_MODE_REDUCE);
1611 		cabac_rate_hz = mpp_get_clk_info_rate_hz(&dec->cabac_clk_info,
1612 							 CLK_MODE_REDUCE);
1613 		rkvdec_devf_set_clk(dec, aclk_rate_hz,
1614 				    core_rate_hz, cabac_rate_hz,
1615 				    EVENT_ADJUST);
1616 	}
1617 #else
1618 	mpp_clk_set_rate(&dec->aclk_info, CLK_MODE_REDUCE);
1619 	mpp_clk_set_rate(&dec->core_clk_info, CLK_MODE_REDUCE);
1620 	mpp_clk_set_rate(&dec->cabac_clk_info, CLK_MODE_REDUCE);
1621 #endif
1622 
1623 	return 0;
1624 }
1625 
rkvdec_reset(struct mpp_dev * mpp)1626 static int rkvdec_reset(struct mpp_dev *mpp)
1627 {
1628 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1629 
1630 	mpp_debug_enter();
1631 	if (dec->rst_a && dec->rst_h) {
1632 		mpp_pmu_idle_request(mpp, true);
1633 		mpp_safe_reset(dec->rst_niu_a);
1634 		mpp_safe_reset(dec->rst_niu_h);
1635 		mpp_safe_reset(dec->rst_a);
1636 		mpp_safe_reset(dec->rst_h);
1637 		mpp_safe_reset(dec->rst_core);
1638 		mpp_safe_reset(dec->rst_cabac);
1639 		mpp_safe_reset(dec->rst_hevc_cabac);
1640 		udelay(5);
1641 		mpp_safe_unreset(dec->rst_niu_h);
1642 		mpp_safe_unreset(dec->rst_niu_a);
1643 		mpp_safe_unreset(dec->rst_a);
1644 		mpp_safe_unreset(dec->rst_h);
1645 		mpp_safe_unreset(dec->rst_core);
1646 		mpp_safe_unreset(dec->rst_cabac);
1647 		mpp_safe_unreset(dec->rst_hevc_cabac);
1648 		mpp_pmu_idle_request(mpp, false);
1649 	}
1650 	mpp_debug_leave();
1651 
1652 	return 0;
1653 }
1654 
rkvdec_sip_reset(struct mpp_dev * mpp)1655 static int rkvdec_sip_reset(struct mpp_dev *mpp)
1656 {
1657 	struct rkvdec_dev *dec = to_rkvdec_dev(mpp);
1658 
1659 /* The reset flow in arm trustzone firmware */
1660 #if IS_ENABLED(CONFIG_ROCKCHIP_SIP)
1661 	mutex_lock(&dec->sip_reset_lock);
1662 	sip_smc_vpu_reset(0, 0, 0);
1663 	mutex_unlock(&dec->sip_reset_lock);
1664 
1665 	return 0;
1666 #else
1667 	return rkvdec_reset(mpp);
1668 #endif
1669 }
1670 
1671 static struct mpp_hw_ops rkvdec_v1_hw_ops = {
1672 	.init = rkvdec_init,
1673 	.clk_on = rkvdec_clk_on,
1674 	.clk_off = rkvdec_clk_off,
1675 	.get_freq = rkvdec_get_freq,
1676 	.set_freq = rkvdec_set_freq,
1677 	.reduce_freq = rkvdec_reduce_freq,
1678 	.reset = rkvdec_reset,
1679 };
1680 
1681 static struct mpp_hw_ops rkvdec_px30_hw_ops = {
1682 	.init = rkvdec_px30_init,
1683 	.clk_on = rkvdec_clk_on,
1684 	.clk_off = rkvdec_clk_off,
1685 	.get_freq = rkvdec_get_freq,
1686 	.set_freq = rkvdec_set_freq,
1687 	.reduce_freq = rkvdec_reduce_freq,
1688 	.reset = rkvdec_reset,
1689 	.set_grf = px30_workaround_combo_switch_grf,
1690 };
1691 
1692 static struct mpp_hw_ops rkvdec_3399_hw_ops = {
1693 	.init = rkvdec_init,
1694 	.clk_on = rkvdec_clk_on,
1695 	.clk_off = rkvdec_clk_off,
1696 	.get_freq = rkvdec_get_freq,
1697 	.set_freq = rkvdec_set_freq,
1698 	.reduce_freq = rkvdec_reduce_freq,
1699 	.reset = rkvdec_reset,
1700 };
1701 
1702 static struct mpp_hw_ops rkvdec_3368_hw_ops = {
1703 	.init = rkvdec_init,
1704 	.clk_on = rkvdec_clk_on,
1705 	.clk_off = rkvdec_clk_off,
1706 	.get_freq = rkvdec_get_freq,
1707 	.set_freq = rkvdec_3368_set_freq,
1708 	.reduce_freq = rkvdec_reduce_freq,
1709 	.reset = rkvdec_reset,
1710 	.set_grf = rkvdec_3368_set_grf,
1711 };
1712 
1713 static struct mpp_dev_ops rkvdec_v1_dev_ops = {
1714 	.alloc_task = rkvdec_alloc_task,
1715 	.run = rkvdec_run,
1716 	.irq = rkvdec_irq,
1717 	.isr = rkvdec_isr,
1718 	.finish = rkvdec_finish,
1719 	.result = rkvdec_result,
1720 	.free_task = rkvdec_free_task,
1721 };
1722 
1723 static struct mpp_hw_ops rkvdec_3328_hw_ops = {
1724 	.init = rkvdec_3328_init,
1725 	.exit = rkvdec_3328_exit,
1726 	.clk_on = rkvdec_clk_on,
1727 	.clk_off = rkvdec_clk_off,
1728 	.get_freq = rkvdec_3328_get_freq,
1729 	.set_freq = rkvdec_3328_set_freq,
1730 	.reduce_freq = rkvdec_3328_reduce_freq,
1731 	.reset = rkvdec_sip_reset,
1732 };
1733 
1734 static struct mpp_dev_ops rkvdec_3328_dev_ops = {
1735 	.alloc_task = rkvdec_alloc_task,
1736 	.run = rkvdec_3328_run,
1737 	.irq = rkvdec_irq,
1738 	.isr = rkvdec_3328_isr,
1739 	.finish = rkvdec_finish,
1740 	.result = rkvdec_result,
1741 	.free_task = rkvdec_free_task,
1742 };
1743 
1744 static struct mpp_dev_ops rkvdec_3399_dev_ops = {
1745 	.alloc_task = rkvdec_alloc_task,
1746 	.prepare = rkvdec_prepare_with_reset,
1747 	.run = rkvdec_run,
1748 	.irq = rkvdec_irq,
1749 	.isr = rkvdec_isr,
1750 	.finish = rkvdec_finish_with_record_info,
1751 	.result = rkvdec_result,
1752 	.free_task = rkvdec_free_task,
1753 };
1754 
1755 static struct mpp_dev_ops rkvdec_1126_dev_ops = {
1756 	.alloc_task = rkvdec_alloc_task,
1757 	.run = rkvdec_1126_run,
1758 	.irq = rkvdec_irq,
1759 	.isr = rkvdec_isr,
1760 	.finish = rkvdec_finish,
1761 	.result = rkvdec_result,
1762 	.free_task = rkvdec_free_task,
1763 };
1764 static const struct mpp_dev_var rk_hevcdec_data = {
1765 	.device_type = MPP_DEVICE_HEVC_DEC,
1766 	.hw_info = &rk_hevcdec_hw_info,
1767 	.trans_info = rk_hevcdec_trans,
1768 	.hw_ops = &rkvdec_v1_hw_ops,
1769 	.dev_ops = &rkvdec_v1_dev_ops,
1770 };
1771 
1772 static const struct mpp_dev_var rk_hevcdec_3368_data = {
1773 	.device_type = MPP_DEVICE_HEVC_DEC,
1774 	.hw_info = &rk_hevcdec_hw_info,
1775 	.trans_info = rk_hevcdec_trans,
1776 	.hw_ops = &rkvdec_3368_hw_ops,
1777 	.dev_ops = &rkvdec_v1_dev_ops,
1778 };
1779 
1780 static const struct mpp_dev_var rk_hevcdec_px30_data = {
1781 	.device_type = MPP_DEVICE_HEVC_DEC,
1782 	.hw_info = &rk_hevcdec_hw_info,
1783 	.trans_info = rk_hevcdec_trans,
1784 	.hw_ops = &rkvdec_px30_hw_ops,
1785 	.dev_ops = &rkvdec_v1_dev_ops,
1786 };
1787 
1788 static const struct mpp_dev_var rkvdec_v1_data = {
1789 	.device_type = MPP_DEVICE_RKVDEC,
1790 	.hw_info = &rkvdec_v1_hw_info,
1791 	.trans_info = rkvdec_v1_trans,
1792 	.hw_ops = &rkvdec_v1_hw_ops,
1793 	.dev_ops = &rkvdec_v1_dev_ops,
1794 };
1795 
1796 static const struct mpp_dev_var rkvdec_3399_data = {
1797 	.device_type = MPP_DEVICE_RKVDEC,
1798 	.hw_info = &rkvdec_v1_hw_info,
1799 	.trans_info = rkvdec_v1_trans,
1800 	.hw_ops = &rkvdec_3399_hw_ops,
1801 	.dev_ops = &rkvdec_3399_dev_ops,
1802 };
1803 
1804 static const struct mpp_dev_var rkvdec_3328_data = {
1805 	.device_type = MPP_DEVICE_RKVDEC,
1806 	.hw_info = &rkvdec_v1_hw_info,
1807 	.trans_info = rkvdec_v1_trans,
1808 	.hw_ops = &rkvdec_3328_hw_ops,
1809 	.dev_ops = &rkvdec_3328_dev_ops,
1810 };
1811 
1812 static const struct mpp_dev_var rkvdec_1126_data = {
1813 	.device_type = MPP_DEVICE_RKVDEC,
1814 	.hw_info = &rkvdec_v1_hw_info,
1815 	.trans_info = rkvdec_v1_trans,
1816 	.hw_ops = &rkvdec_v1_hw_ops,
1817 	.dev_ops = &rkvdec_1126_dev_ops,
1818 };
1819 
1820 static const struct of_device_id mpp_rkvdec_dt_match[] = {
1821 	{
1822 		.compatible = "rockchip,hevc-decoder",
1823 		.data = &rk_hevcdec_data,
1824 	},
1825 #ifdef CONFIG_CPU_PX30
1826 	{
1827 		.compatible = "rockchip,hevc-decoder-px30",
1828 		.data = &rk_hevcdec_px30_data,
1829 	},
1830 #endif
1831 #ifdef CONFIG_CPU_RK3368
1832 	{
1833 		.compatible = "rockchip,hevc-decoder-rk3368",
1834 		.data = &rk_hevcdec_3368_data,
1835 	},
1836 #endif
1837 	{
1838 		.compatible = "rockchip,rkv-decoder-v1",
1839 		.data = &rkvdec_v1_data,
1840 	},
1841 #ifdef CONFIG_CPU_RK3399
1842 	{
1843 		.compatible = "rockchip,rkv-decoder-rk3399",
1844 		.data = &rkvdec_3399_data,
1845 	},
1846 #endif
1847 #ifdef CONFIG_CPU_RK3328
1848 	{
1849 		.compatible = "rockchip,rkv-decoder-rk3328",
1850 		.data = &rkvdec_3328_data,
1851 	},
1852 #endif
1853 #ifdef CONFIG_CPU_RV1126
1854 	{
1855 		.compatible = "rockchip,rkv-decoder-rv1126",
1856 		.data = &rkvdec_1126_data,
1857 	},
1858 #endif
1859 	{},
1860 };
1861 
rkvdec_probe(struct platform_device * pdev)1862 static int rkvdec_probe(struct platform_device *pdev)
1863 {
1864 	struct device *dev = &pdev->dev;
1865 	struct rkvdec_dev *dec = NULL;
1866 	struct mpp_dev *mpp = NULL;
1867 	const struct of_device_id *match = NULL;
1868 	int ret = 0;
1869 
1870 	dev_info(dev, "probing start\n");
1871 	dec = devm_kzalloc(dev, sizeof(*dec), GFP_KERNEL);
1872 	if (!dec)
1873 		return -ENOMEM;
1874 
1875 	mpp = &dec->mpp;
1876 	platform_set_drvdata(pdev, dec);
1877 
1878 	if (pdev->dev.of_node) {
1879 		match = of_match_node(mpp_rkvdec_dt_match,
1880 				      pdev->dev.of_node);
1881 		if (match)
1882 			mpp->var = (struct mpp_dev_var *)match->data;
1883 	}
1884 
1885 	ret = mpp_dev_probe(mpp, pdev);
1886 	if (ret) {
1887 		dev_err(dev, "probe sub driver failed\n");
1888 		return ret;
1889 	}
1890 
1891 	ret = devm_request_threaded_irq(dev, mpp->irq,
1892 					mpp_dev_irq,
1893 					mpp_dev_isr_sched,
1894 					IRQF_SHARED,
1895 					dev_name(dev), mpp);
1896 	if (ret) {
1897 		dev_err(dev, "register interrupter runtime failed\n");
1898 		return -EINVAL;
1899 	}
1900 
1901 	mpp->session_max_buffers = RKVDEC_SESSION_MAX_BUFFERS;
1902 	rkvdec_procfs_init(mpp);
1903 	/* register current device to mpp service */
1904 	mpp_dev_register_srv(mpp, mpp->srv);
1905 	dev_info(dev, "probing finish\n");
1906 
1907 	return 0;
1908 }
1909 
rkvdec_remove(struct platform_device * pdev)1910 static int rkvdec_remove(struct platform_device *pdev)
1911 {
1912 	struct device *dev = &pdev->dev;
1913 	struct rkvdec_dev *dec = platform_get_drvdata(pdev);
1914 
1915 	dev_info(dev, "remove device\n");
1916 	mpp_dev_remove(&dec->mpp);
1917 	rkvdec_procfs_remove(&dec->mpp);
1918 
1919 	return 0;
1920 }
1921 
rkvdec_shutdown(struct platform_device * pdev)1922 static void rkvdec_shutdown(struct platform_device *pdev)
1923 {
1924 	int ret;
1925 	int val;
1926 	struct device *dev = &pdev->dev;
1927 	struct rkvdec_dev *dec = platform_get_drvdata(pdev);
1928 	struct mpp_dev *mpp = &dec->mpp;
1929 
1930 	dev_info(dev, "shutdown device\n");
1931 
1932 	atomic_inc(&mpp->srv->shutdown_request);
1933 	ret = readx_poll_timeout(atomic_read,
1934 				 &mpp->task_count,
1935 				 val, val == 0, 20000, 200000);
1936 	if (ret == -ETIMEDOUT)
1937 		dev_err(dev, "wait total running time out\n");
1938 }
1939 
1940 struct platform_driver rockchip_rkvdec_driver = {
1941 	.probe = rkvdec_probe,
1942 	.remove = rkvdec_remove,
1943 	.shutdown = rkvdec_shutdown,
1944 	.driver = {
1945 		.name = RKVDEC_DRIVER_NAME,
1946 		.of_match_table = of_match_ptr(mpp_rkvdec_dt_match),
1947 	},
1948 };
1949 EXPORT_SYMBOL(rockchip_rkvdec_driver);
1950