• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) STMicroelectronics SA 2015
4  * Authors: Yannick Fertre <yannick.fertre@st.com>
5  *          Hugues Fruchet <hugues.fruchet@st.com>
6  */
7 
8 #include <linux/clk.h>
9 #include <linux/interrupt.h>
10 #include <linux/platform_device.h>
11 #include <linux/pm_runtime.h>
12 #ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
13 #include <linux/seq_file.h>
14 #endif
15 
16 #include "hva.h"
17 #include "hva-hw.h"
18 
19 /* HVA register offsets */
20 #define HVA_HIF_REG_RST                 0x0100U
21 #define HVA_HIF_REG_RST_ACK             0x0104U
22 #define HVA_HIF_REG_MIF_CFG             0x0108U
23 #define HVA_HIF_REG_HEC_MIF_CFG         0x010CU
24 #define HVA_HIF_REG_CFL                 0x0110U
25 #define HVA_HIF_FIFO_CMD                0x0114U
26 #define HVA_HIF_FIFO_STS                0x0118U
27 #define HVA_HIF_REG_SFL                 0x011CU
28 #define HVA_HIF_REG_IT_ACK              0x0120U
29 #define HVA_HIF_REG_ERR_IT_ACK          0x0124U
30 #define HVA_HIF_REG_LMI_ERR             0x0128U
31 #define HVA_HIF_REG_EMI_ERR             0x012CU
32 #define HVA_HIF_REG_HEC_MIF_ERR         0x0130U
33 #define HVA_HIF_REG_HEC_STS             0x0134U
34 #define HVA_HIF_REG_HVC_STS             0x0138U
35 #define HVA_HIF_REG_HJE_STS             0x013CU
36 #define HVA_HIF_REG_CNT                 0x0140U
37 #define HVA_HIF_REG_HEC_CHKSYN_DIS      0x0144U
38 #define HVA_HIF_REG_CLK_GATING          0x0148U
39 #define HVA_HIF_REG_VERSION             0x014CU
40 #define HVA_HIF_REG_BSM                 0x0150U
41 
42 /* define value for version id register (HVA_HIF_REG_VERSION) */
43 #define VERSION_ID_MASK	0x0000FFFF
44 
45 /* define values for BSM register (HVA_HIF_REG_BSM) */
46 #define BSM_CFG_VAL1	0x0003F000
47 #define BSM_CFG_VAL2	0x003F0000
48 
49 /* define values for memory interface register (HVA_HIF_REG_MIF_CFG) */
50 #define MIF_CFG_VAL1	0x04460446
51 #define MIF_CFG_VAL2	0x04460806
52 #define MIF_CFG_VAL3	0x00000000
53 
54 /* define value for HEC memory interface register (HVA_HIF_REG_MIF_CFG) */
55 #define HEC_MIF_CFG_VAL	0x000000C4
56 
57 /*  Bits definition for clock gating register (HVA_HIF_REG_CLK_GATING) */
58 #define CLK_GATING_HVC	BIT(0)
59 #define CLK_GATING_HEC	BIT(1)
60 #define CLK_GATING_HJE	BIT(2)
61 
62 /* fix hva clock rate */
63 #define CLK_RATE		300000000
64 
65 /* fix delay for pmruntime */
66 #define AUTOSUSPEND_DELAY_MS	3
67 
68 /*
69  * hw encode error values
70  * NO_ERROR: Success, Task OK
71  * H264_BITSTREAM_OVERSIZE: VECH264 Bitstream size > bitstream buffer
72  * H264_FRAME_SKIPPED: VECH264 Frame skipped (refers to CPB Buffer Size)
73  * H264_SLICE_LIMIT_SIZE: VECH264 MB > slice limit size
74  * H264_MAX_SLICE_NUMBER: VECH264 max slice number reached
75  * H264_SLICE_READY: VECH264 Slice ready
76  * TASK_LIST_FULL: HVA/FPC task list full
77 		   (discard latest transform command)
78  * UNKNOWN_COMMAND: Transform command not known by HVA/FPC
79  * WRONG_CODEC_OR_RESOLUTION: Wrong Codec or Resolution Selection
80  * NO_INT_COMPLETION: Time-out on interrupt completion
81  * LMI_ERR: Local Memory Interface Error
82  * EMI_ERR: External Memory Interface Error
83  * HECMI_ERR: HEC Memory Interface Error
84  */
85 enum hva_hw_error {
86 	NO_ERROR = 0x0,
87 	H264_BITSTREAM_OVERSIZE = 0x2,
88 	H264_FRAME_SKIPPED = 0x4,
89 	H264_SLICE_LIMIT_SIZE = 0x5,
90 	H264_MAX_SLICE_NUMBER = 0x7,
91 	H264_SLICE_READY = 0x8,
92 	TASK_LIST_FULL = 0xF0,
93 	UNKNOWN_COMMAND = 0xF1,
94 	WRONG_CODEC_OR_RESOLUTION = 0xF4,
95 	NO_INT_COMPLETION = 0x100,
96 	LMI_ERR = 0x101,
97 	EMI_ERR = 0x102,
98 	HECMI_ERR = 0x103,
99 };
100 
hva_hw_its_interrupt(int irq,void * data)101 static irqreturn_t hva_hw_its_interrupt(int irq, void *data)
102 {
103 	struct hva_dev *hva = data;
104 
105 	/* read status registers */
106 	hva->sts_reg = readl_relaxed(hva->regs + HVA_HIF_FIFO_STS);
107 	hva->sfl_reg = readl_relaxed(hva->regs + HVA_HIF_REG_SFL);
108 
109 	/* acknowledge interruption */
110 	writel_relaxed(0x1, hva->regs + HVA_HIF_REG_IT_ACK);
111 
112 	return IRQ_WAKE_THREAD;
113 }
114 
hva_hw_its_irq_thread(int irq,void * arg)115 static irqreturn_t hva_hw_its_irq_thread(int irq, void *arg)
116 {
117 	struct hva_dev *hva = arg;
118 	struct device *dev = hva_to_dev(hva);
119 	u32 status = hva->sts_reg & 0xFF;
120 	u8 ctx_id = 0;
121 	struct hva_ctx *ctx = NULL;
122 
123 	dev_dbg(dev, "%s     %s: status: 0x%02x fifo level: 0x%02x\n",
124 		HVA_PREFIX, __func__, hva->sts_reg & 0xFF, hva->sfl_reg & 0xF);
125 
126 	/*
127 	 * status: task_id[31:16] client_id[15:8] status[7:0]
128 	 * the context identifier is retrieved from the client identifier
129 	 */
130 	ctx_id = (hva->sts_reg & 0xFF00) >> 8;
131 	if (ctx_id >= HVA_MAX_INSTANCES) {
132 		dev_err(dev, "%s     %s: bad context identifier: %d\n",
133 			HVA_PREFIX, __func__, ctx_id);
134 		goto out;
135 	}
136 
137 	ctx = hva->instances[ctx_id];
138 	if (!ctx)
139 		goto out;
140 
141 	switch (status) {
142 	case NO_ERROR:
143 		dev_dbg(dev, "%s     %s: no error\n",
144 			ctx->name, __func__);
145 		ctx->hw_err = false;
146 		break;
147 	case H264_SLICE_READY:
148 		dev_dbg(dev, "%s     %s: h264 slice ready\n",
149 			ctx->name, __func__);
150 		ctx->hw_err = false;
151 		break;
152 	case H264_FRAME_SKIPPED:
153 		dev_dbg(dev, "%s     %s: h264 frame skipped\n",
154 			ctx->name, __func__);
155 		ctx->hw_err = false;
156 		break;
157 	case H264_BITSTREAM_OVERSIZE:
158 		dev_err(dev, "%s     %s:h264 bitstream oversize\n",
159 			ctx->name, __func__);
160 		ctx->hw_err = true;
161 		break;
162 	case H264_SLICE_LIMIT_SIZE:
163 		dev_err(dev, "%s     %s: h264 slice limit size is reached\n",
164 			ctx->name, __func__);
165 		ctx->hw_err = true;
166 		break;
167 	case H264_MAX_SLICE_NUMBER:
168 		dev_err(dev, "%s     %s: h264 max slice number is reached\n",
169 			ctx->name, __func__);
170 		ctx->hw_err = true;
171 		break;
172 	case TASK_LIST_FULL:
173 		dev_err(dev, "%s     %s:task list full\n",
174 			ctx->name, __func__);
175 		ctx->hw_err = true;
176 		break;
177 	case UNKNOWN_COMMAND:
178 		dev_err(dev, "%s     %s: command not known\n",
179 			ctx->name, __func__);
180 		ctx->hw_err = true;
181 		break;
182 	case WRONG_CODEC_OR_RESOLUTION:
183 		dev_err(dev, "%s     %s: wrong codec or resolution\n",
184 			ctx->name, __func__);
185 		ctx->hw_err = true;
186 		break;
187 	default:
188 		dev_err(dev, "%s     %s: status not recognized\n",
189 			ctx->name, __func__);
190 		ctx->hw_err = true;
191 		break;
192 	}
193 out:
194 	complete(&hva->interrupt);
195 
196 	return IRQ_HANDLED;
197 }
198 
hva_hw_err_interrupt(int irq,void * data)199 static irqreturn_t hva_hw_err_interrupt(int irq, void *data)
200 {
201 	struct hva_dev *hva = data;
202 
203 	/* read status registers */
204 	hva->sts_reg = readl_relaxed(hva->regs + HVA_HIF_FIFO_STS);
205 	hva->sfl_reg = readl_relaxed(hva->regs + HVA_HIF_REG_SFL);
206 
207 	/* read error registers */
208 	hva->lmi_err_reg = readl_relaxed(hva->regs + HVA_HIF_REG_LMI_ERR);
209 	hva->emi_err_reg = readl_relaxed(hva->regs + HVA_HIF_REG_EMI_ERR);
210 	hva->hec_mif_err_reg = readl_relaxed(hva->regs +
211 					     HVA_HIF_REG_HEC_MIF_ERR);
212 
213 	/* acknowledge interruption */
214 	writel_relaxed(0x1, hva->regs + HVA_HIF_REG_IT_ACK);
215 
216 	return IRQ_WAKE_THREAD;
217 }
218 
hva_hw_err_irq_thread(int irq,void * arg)219 static irqreturn_t hva_hw_err_irq_thread(int irq, void *arg)
220 {
221 	struct hva_dev *hva = arg;
222 	struct device *dev = hva_to_dev(hva);
223 	u8 ctx_id = 0;
224 	struct hva_ctx *ctx;
225 
226 	dev_dbg(dev, "%s     status: 0x%02x fifo level: 0x%02x\n",
227 		HVA_PREFIX, hva->sts_reg & 0xFF, hva->sfl_reg & 0xF);
228 
229 	/*
230 	 * status: task_id[31:16] client_id[15:8] status[7:0]
231 	 * the context identifier is retrieved from the client identifier
232 	 */
233 	ctx_id = (hva->sts_reg & 0xFF00) >> 8;
234 	if (ctx_id >= HVA_MAX_INSTANCES) {
235 		dev_err(dev, "%s     bad context identifier: %d\n", HVA_PREFIX,
236 			ctx_id);
237 		goto out;
238 	}
239 
240 	ctx = hva->instances[ctx_id];
241 	if (!ctx)
242 		goto out;
243 
244 	if (hva->lmi_err_reg) {
245 		dev_err(dev, "%s     local memory interface error: 0x%08x\n",
246 			ctx->name, hva->lmi_err_reg);
247 		ctx->hw_err = true;
248 	}
249 
250 	if (hva->emi_err_reg) {
251 		dev_err(dev, "%s     external memory interface error: 0x%08x\n",
252 			ctx->name, hva->emi_err_reg);
253 		ctx->hw_err = true;
254 	}
255 
256 	if (hva->hec_mif_err_reg) {
257 		dev_err(dev, "%s     hec memory interface error: 0x%08x\n",
258 			ctx->name, hva->hec_mif_err_reg);
259 		ctx->hw_err = true;
260 	}
261 out:
262 	complete(&hva->interrupt);
263 
264 	return IRQ_HANDLED;
265 }
266 
hva_hw_get_ip_version(struct hva_dev * hva)267 static unsigned long int hva_hw_get_ip_version(struct hva_dev *hva)
268 {
269 	struct device *dev = hva_to_dev(hva);
270 	unsigned long int version;
271 
272 	if (pm_runtime_get_sync(dev) < 0) {
273 		dev_err(dev, "%s     failed to get pm_runtime\n", HVA_PREFIX);
274 		pm_runtime_put_noidle(dev);
275 		mutex_unlock(&hva->protect_mutex);
276 		return -EFAULT;
277 	}
278 
279 	version = readl_relaxed(hva->regs + HVA_HIF_REG_VERSION) &
280 				VERSION_ID_MASK;
281 
282 	pm_runtime_put_autosuspend(dev);
283 
284 	switch (version) {
285 	case HVA_VERSION_V400:
286 		dev_dbg(dev, "%s     IP hardware version 0x%lx\n",
287 			HVA_PREFIX, version);
288 		break;
289 	default:
290 		dev_err(dev, "%s     unknown IP hardware version 0x%lx\n",
291 			HVA_PREFIX, version);
292 		version = HVA_VERSION_UNKNOWN;
293 		break;
294 	}
295 
296 	return version;
297 }
298 
hva_hw_probe(struct platform_device * pdev,struct hva_dev * hva)299 int hva_hw_probe(struct platform_device *pdev, struct hva_dev *hva)
300 {
301 	struct device *dev = &pdev->dev;
302 	struct resource *regs;
303 	struct resource *esram;
304 	int ret;
305 
306 	WARN_ON(!hva);
307 
308 	/* get memory for registers */
309 	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
310 	hva->regs = devm_ioremap_resource(dev, regs);
311 	if (IS_ERR(hva->regs)) {
312 		dev_err(dev, "%s     failed to get regs\n", HVA_PREFIX);
313 		return PTR_ERR(hva->regs);
314 	}
315 
316 	/* get memory for esram */
317 	esram = platform_get_resource(pdev, IORESOURCE_MEM, 1);
318 	if (!esram) {
319 		dev_err(dev, "%s     failed to get esram\n", HVA_PREFIX);
320 		return -ENODEV;
321 	}
322 	hva->esram_addr = esram->start;
323 	hva->esram_size = resource_size(esram);
324 
325 	dev_info(dev, "%s     esram reserved for address: 0x%x size:%d\n",
326 		 HVA_PREFIX, hva->esram_addr, hva->esram_size);
327 
328 	/* get clock resource */
329 	hva->clk = devm_clk_get(dev, "clk_hva");
330 	if (IS_ERR(hva->clk)) {
331 		dev_err(dev, "%s     failed to get clock\n", HVA_PREFIX);
332 		return PTR_ERR(hva->clk);
333 	}
334 
335 	ret = clk_prepare(hva->clk);
336 	if (ret < 0) {
337 		dev_err(dev, "%s     failed to prepare clock\n", HVA_PREFIX);
338 		hva->clk = ERR_PTR(-EINVAL);
339 		return ret;
340 	}
341 
342 	/* get status interruption resource */
343 	ret  = platform_get_irq(pdev, 0);
344 	if (ret < 0)
345 		goto err_clk;
346 	hva->irq_its = ret;
347 
348 	ret = devm_request_threaded_irq(dev, hva->irq_its, hva_hw_its_interrupt,
349 					hva_hw_its_irq_thread,
350 					IRQF_ONESHOT,
351 					"hva_its_irq", hva);
352 	if (ret) {
353 		dev_err(dev, "%s     failed to install status IRQ 0x%x\n",
354 			HVA_PREFIX, hva->irq_its);
355 		goto err_clk;
356 	}
357 	disable_irq(hva->irq_its);
358 
359 	/* get error interruption resource */
360 	ret = platform_get_irq(pdev, 1);
361 	if (ret < 0)
362 		goto err_clk;
363 	hva->irq_err = ret;
364 
365 	ret = devm_request_threaded_irq(dev, hva->irq_err, hva_hw_err_interrupt,
366 					hva_hw_err_irq_thread,
367 					IRQF_ONESHOT,
368 					"hva_err_irq", hva);
369 	if (ret) {
370 		dev_err(dev, "%s     failed to install error IRQ 0x%x\n",
371 			HVA_PREFIX, hva->irq_err);
372 		goto err_clk;
373 	}
374 	disable_irq(hva->irq_err);
375 
376 	/* initialise protection mutex */
377 	mutex_init(&hva->protect_mutex);
378 
379 	/* initialise completion signal */
380 	init_completion(&hva->interrupt);
381 
382 	/* initialise runtime power management */
383 	pm_runtime_set_autosuspend_delay(dev, AUTOSUSPEND_DELAY_MS);
384 	pm_runtime_use_autosuspend(dev);
385 	pm_runtime_set_suspended(dev);
386 	pm_runtime_enable(dev);
387 
388 	ret = pm_runtime_get_sync(dev);
389 	if (ret < 0) {
390 		dev_err(dev, "%s     failed to set PM\n", HVA_PREFIX);
391 		goto err_pm;
392 	}
393 
394 	/* check IP hardware version */
395 	hva->ip_version = hva_hw_get_ip_version(hva);
396 
397 	if (hva->ip_version == HVA_VERSION_UNKNOWN) {
398 		ret = -EINVAL;
399 		goto err_pm;
400 	}
401 
402 	dev_info(dev, "%s     found hva device (version 0x%lx)\n", HVA_PREFIX,
403 		 hva->ip_version);
404 
405 	return 0;
406 
407 err_pm:
408 	pm_runtime_put(dev);
409 err_clk:
410 	if (hva->clk)
411 		clk_unprepare(hva->clk);
412 
413 	return ret;
414 }
415 
hva_hw_remove(struct hva_dev * hva)416 void hva_hw_remove(struct hva_dev *hva)
417 {
418 	struct device *dev = hva_to_dev(hva);
419 
420 	disable_irq(hva->irq_its);
421 	disable_irq(hva->irq_err);
422 
423 	pm_runtime_put_autosuspend(dev);
424 	pm_runtime_disable(dev);
425 }
426 
hva_hw_runtime_suspend(struct device * dev)427 int hva_hw_runtime_suspend(struct device *dev)
428 {
429 	struct hva_dev *hva = dev_get_drvdata(dev);
430 
431 	clk_disable_unprepare(hva->clk);
432 
433 	return 0;
434 }
435 
hva_hw_runtime_resume(struct device * dev)436 int hva_hw_runtime_resume(struct device *dev)
437 {
438 	struct hva_dev *hva = dev_get_drvdata(dev);
439 
440 	if (clk_prepare_enable(hva->clk)) {
441 		dev_err(hva->dev, "%s     failed to prepare hva clk\n",
442 			HVA_PREFIX);
443 		return -EINVAL;
444 	}
445 
446 	if (clk_set_rate(hva->clk, CLK_RATE)) {
447 		dev_err(dev, "%s     failed to set clock frequency\n",
448 			HVA_PREFIX);
449 		return -EINVAL;
450 	}
451 
452 	return 0;
453 }
454 
hva_hw_execute_task(struct hva_ctx * ctx,enum hva_hw_cmd_type cmd,struct hva_buffer * task)455 int hva_hw_execute_task(struct hva_ctx *ctx, enum hva_hw_cmd_type cmd,
456 			struct hva_buffer *task)
457 {
458 	struct hva_dev *hva = ctx_to_hdev(ctx);
459 	struct device *dev = hva_to_dev(hva);
460 	u8 client_id = ctx->id;
461 	int ret;
462 	u32 reg = 0;
463 
464 	mutex_lock(&hva->protect_mutex);
465 
466 	/* enable irqs */
467 	enable_irq(hva->irq_its);
468 	enable_irq(hva->irq_err);
469 
470 	if (pm_runtime_get_sync(dev) < 0) {
471 		dev_err(dev, "%s     failed to get pm_runtime\n", ctx->name);
472 		ctx->sys_errors++;
473 		ret = -EFAULT;
474 		goto out;
475 	}
476 
477 	reg = readl_relaxed(hva->regs + HVA_HIF_REG_CLK_GATING);
478 	switch (cmd) {
479 	case H264_ENC:
480 		reg |= CLK_GATING_HVC;
481 		break;
482 	default:
483 		dev_dbg(dev, "%s     unknown command 0x%x\n", ctx->name, cmd);
484 		ctx->encode_errors++;
485 		ret = -EFAULT;
486 		goto out;
487 	}
488 	writel_relaxed(reg, hva->regs + HVA_HIF_REG_CLK_GATING);
489 
490 	dev_dbg(dev, "%s     %s: write configuration registers\n", ctx->name,
491 		__func__);
492 
493 	/* byte swap config */
494 	writel_relaxed(BSM_CFG_VAL1, hva->regs + HVA_HIF_REG_BSM);
495 
496 	/* define Max Opcode Size and Max Message Size for LMI and EMI */
497 	writel_relaxed(MIF_CFG_VAL3, hva->regs + HVA_HIF_REG_MIF_CFG);
498 	writel_relaxed(HEC_MIF_CFG_VAL, hva->regs + HVA_HIF_REG_HEC_MIF_CFG);
499 
500 	/*
501 	 * command FIFO: task_id[31:16] client_id[15:8] command_type[7:0]
502 	 * the context identifier is provided as client identifier to the
503 	 * hardware, and is retrieved in the interrupt functions from the
504 	 * status register
505 	 */
506 	dev_dbg(dev, "%s     %s: send task (cmd: %d, task_desc: %pad)\n",
507 		ctx->name, __func__, cmd + (client_id << 8), &task->paddr);
508 	writel_relaxed(cmd + (client_id << 8), hva->regs + HVA_HIF_FIFO_CMD);
509 	writel_relaxed(task->paddr, hva->regs + HVA_HIF_FIFO_CMD);
510 
511 	if (!wait_for_completion_timeout(&hva->interrupt,
512 					 msecs_to_jiffies(2000))) {
513 		dev_err(dev, "%s     %s: time out on completion\n", ctx->name,
514 			__func__);
515 		ctx->encode_errors++;
516 		ret = -EFAULT;
517 		goto out;
518 	}
519 
520 	/* get encoding status */
521 	ret = ctx->hw_err ? -EFAULT : 0;
522 
523 	ctx->encode_errors += ctx->hw_err ? 1 : 0;
524 
525 out:
526 	disable_irq(hva->irq_its);
527 	disable_irq(hva->irq_err);
528 
529 	switch (cmd) {
530 	case H264_ENC:
531 		reg &= ~CLK_GATING_HVC;
532 		writel_relaxed(reg, hva->regs + HVA_HIF_REG_CLK_GATING);
533 		break;
534 	default:
535 		dev_dbg(dev, "%s     unknown command 0x%x\n", ctx->name, cmd);
536 	}
537 
538 	pm_runtime_put_autosuspend(dev);
539 	mutex_unlock(&hva->protect_mutex);
540 
541 	return ret;
542 }
543 
544 #ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
545 #define DUMP(reg) seq_printf(s, "%-30s: 0x%08X\n",\
546 			     #reg, readl_relaxed(hva->regs + reg))
547 
hva_hw_dump_regs(struct hva_dev * hva,struct seq_file * s)548 void hva_hw_dump_regs(struct hva_dev *hva, struct seq_file *s)
549 {
550 	struct device *dev = hva_to_dev(hva);
551 
552 	mutex_lock(&hva->protect_mutex);
553 
554 	if (pm_runtime_get_sync(dev) < 0) {
555 		seq_puts(s, "Cannot wake up IP\n");
556 		pm_runtime_put_noidle(dev);
557 		mutex_unlock(&hva->protect_mutex);
558 		return;
559 	}
560 
561 	seq_printf(s, "Registers:\nReg @ = 0x%p\n", hva->regs);
562 
563 	DUMP(HVA_HIF_REG_RST);
564 	DUMP(HVA_HIF_REG_RST_ACK);
565 	DUMP(HVA_HIF_REG_MIF_CFG);
566 	DUMP(HVA_HIF_REG_HEC_MIF_CFG);
567 	DUMP(HVA_HIF_REG_CFL);
568 	DUMP(HVA_HIF_REG_SFL);
569 	DUMP(HVA_HIF_REG_LMI_ERR);
570 	DUMP(HVA_HIF_REG_EMI_ERR);
571 	DUMP(HVA_HIF_REG_HEC_MIF_ERR);
572 	DUMP(HVA_HIF_REG_HEC_STS);
573 	DUMP(HVA_HIF_REG_HVC_STS);
574 	DUMP(HVA_HIF_REG_HJE_STS);
575 	DUMP(HVA_HIF_REG_CNT);
576 	DUMP(HVA_HIF_REG_HEC_CHKSYN_DIS);
577 	DUMP(HVA_HIF_REG_CLK_GATING);
578 	DUMP(HVA_HIF_REG_VERSION);
579 
580 	pm_runtime_put_autosuspend(dev);
581 	mutex_unlock(&hva->protect_mutex);
582 }
583 #endif
584