• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2020-2021 NXP
4  */
5 
6 #include <linux/init.h>
7 #include <linux/interconnect.h>
8 #include <linux/ioctl.h>
9 #include <linux/list.h>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/of_device.h>
13 #include <linux/of_address.h>
14 #include <linux/platform_device.h>
15 #include <linux/slab.h>
16 #include <linux/types.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/pm_domain.h>
19 #include <linux/firmware.h>
20 #include <linux/vmalloc.h>
21 #include "vpu.h"
22 #include "vpu_defs.h"
23 #include "vpu_core.h"
24 #include "vpu_mbox.h"
25 #include "vpu_msgs.h"
26 #include "vpu_rpc.h"
27 #include "vpu_cmds.h"
28 
csr_writel(struct vpu_core * core,u32 reg,u32 val)29 void csr_writel(struct vpu_core *core, u32 reg, u32 val)
30 {
31 	writel(val, core->base + reg);
32 }
33 
csr_readl(struct vpu_core * core,u32 reg)34 u32 csr_readl(struct vpu_core *core, u32 reg)
35 {
36 	return readl(core->base + reg);
37 }
38 
vpu_core_load_firmware(struct vpu_core * core)39 static int vpu_core_load_firmware(struct vpu_core *core)
40 {
41 	const struct firmware *pfw = NULL;
42 	int ret = 0;
43 
44 	if (!core->fw.virt) {
45 		dev_err(core->dev, "firmware buffer is not ready\n");
46 		return -EINVAL;
47 	}
48 
49 	ret = request_firmware(&pfw, core->res->fwname, core->dev);
50 	dev_dbg(core->dev, "request_firmware %s : %d\n", core->res->fwname, ret);
51 	if (ret) {
52 		dev_err(core->dev, "request firmware %s failed, ret = %d\n",
53 			core->res->fwname, ret);
54 		return ret;
55 	}
56 
57 	if (core->fw.length < pfw->size) {
58 		dev_err(core->dev, "firmware buffer size want %zu, but %d\n",
59 			pfw->size, core->fw.length);
60 		ret = -EINVAL;
61 		goto exit;
62 	}
63 
64 	memset(core->fw.virt, 0, core->fw.length);
65 	memcpy(core->fw.virt, pfw->data, pfw->size);
66 	core->fw.bytesused = pfw->size;
67 	ret = vpu_iface_on_firmware_loaded(core);
68 exit:
69 	release_firmware(pfw);
70 	pfw = NULL;
71 
72 	return ret;
73 }
74 
vpu_core_boot_done(struct vpu_core * core)75 static int vpu_core_boot_done(struct vpu_core *core)
76 {
77 	u32 fw_version;
78 
79 	fw_version = vpu_iface_get_version(core);
80 	dev_info(core->dev, "%s firmware version : %d.%d.%d\n",
81 		 vpu_core_type_desc(core->type),
82 		 (fw_version >> 16) & 0xff,
83 		 (fw_version >> 8) & 0xff,
84 		 fw_version & 0xff);
85 	core->supported_instance_count = vpu_iface_get_max_instance_count(core);
86 	if (core->res->act_size) {
87 		u32 count = core->act.length / core->res->act_size;
88 
89 		core->supported_instance_count = min(core->supported_instance_count, count);
90 	}
91 	if (core->supported_instance_count >= BITS_PER_TYPE(core->instance_mask))
92 		core->supported_instance_count = BITS_PER_TYPE(core->instance_mask);
93 	core->fw_version = fw_version;
94 	vpu_core_set_state(core, VPU_CORE_ACTIVE);
95 
96 	return 0;
97 }
98 
vpu_core_wait_boot_done(struct vpu_core * core)99 static int vpu_core_wait_boot_done(struct vpu_core *core)
100 {
101 	int ret;
102 
103 	ret = wait_for_completion_timeout(&core->cmp, VPU_TIMEOUT);
104 	if (!ret) {
105 		dev_err(core->dev, "boot timeout\n");
106 		return -EINVAL;
107 	}
108 	return vpu_core_boot_done(core);
109 }
110 
vpu_core_boot(struct vpu_core * core,bool load)111 static int vpu_core_boot(struct vpu_core *core, bool load)
112 {
113 	int ret;
114 
115 	reinit_completion(&core->cmp);
116 	if (load) {
117 		ret = vpu_core_load_firmware(core);
118 		if (ret)
119 			return ret;
120 	}
121 
122 	vpu_iface_boot_core(core);
123 	return vpu_core_wait_boot_done(core);
124 }
125 
vpu_core_shutdown(struct vpu_core * core)126 static int vpu_core_shutdown(struct vpu_core *core)
127 {
128 	return vpu_iface_shutdown_core(core);
129 }
130 
vpu_core_restore(struct vpu_core * core)131 static int vpu_core_restore(struct vpu_core *core)
132 {
133 	int ret;
134 
135 	ret = vpu_core_sw_reset(core);
136 	if (ret)
137 		return ret;
138 
139 	vpu_core_boot_done(core);
140 	return vpu_iface_restore_core(core);
141 }
142 
__vpu_alloc_dma(struct device * dev,struct vpu_buffer * buf)143 static int __vpu_alloc_dma(struct device *dev, struct vpu_buffer *buf)
144 {
145 	gfp_t gfp = GFP_KERNEL | GFP_DMA32;
146 
147 	if (!buf->length)
148 		return 0;
149 
150 	buf->virt = dma_alloc_coherent(dev, buf->length, &buf->phys, gfp);
151 	if (!buf->virt)
152 		return -ENOMEM;
153 
154 	buf->dev = dev;
155 
156 	return 0;
157 }
158 
vpu_free_dma(struct vpu_buffer * buf)159 void vpu_free_dma(struct vpu_buffer *buf)
160 {
161 	if (!buf->virt || !buf->dev)
162 		return;
163 
164 	dma_free_coherent(buf->dev, buf->length, buf->virt, buf->phys);
165 	buf->virt = NULL;
166 	buf->phys = 0;
167 	buf->length = 0;
168 	buf->bytesused = 0;
169 	buf->dev = NULL;
170 }
171 
vpu_alloc_dma(struct vpu_core * core,struct vpu_buffer * buf)172 int vpu_alloc_dma(struct vpu_core *core, struct vpu_buffer *buf)
173 {
174 	return __vpu_alloc_dma(core->dev, buf);
175 }
176 
vpu_core_set_state(struct vpu_core * core,enum vpu_core_state state)177 void vpu_core_set_state(struct vpu_core *core, enum vpu_core_state state)
178 {
179 	if (state != core->state)
180 		vpu_trace(core->dev, "vpu core state change from %d to %d\n", core->state, state);
181 	core->state = state;
182 	if (core->state == VPU_CORE_DEINIT)
183 		core->hang_mask = 0;
184 }
185 
vpu_core_update_state(struct vpu_core * core)186 static void vpu_core_update_state(struct vpu_core *core)
187 {
188 	if (!vpu_iface_get_power_state(core)) {
189 		if (core->request_count)
190 			vpu_core_set_state(core, VPU_CORE_HANG);
191 		else
192 			vpu_core_set_state(core, VPU_CORE_DEINIT);
193 
194 	} else if (core->state == VPU_CORE_ACTIVE && core->hang_mask) {
195 		vpu_core_set_state(core, VPU_CORE_HANG);
196 	}
197 }
198 
vpu_core_find_proper_by_type(struct vpu_dev * vpu,u32 type)199 static struct vpu_core *vpu_core_find_proper_by_type(struct vpu_dev *vpu, u32 type)
200 {
201 	struct vpu_core *core = NULL;
202 	int request_count = INT_MAX;
203 	struct vpu_core *c;
204 
205 	list_for_each_entry(c, &vpu->cores, list) {
206 		dev_dbg(c->dev, "instance_mask = 0x%lx, state = %d\n", c->instance_mask, c->state);
207 		if (c->type != type)
208 			continue;
209 		mutex_lock(&c->lock);
210 		vpu_core_update_state(c);
211 		mutex_unlock(&c->lock);
212 		if (c->state == VPU_CORE_DEINIT) {
213 			core = c;
214 			break;
215 		}
216 		if (c->state != VPU_CORE_ACTIVE)
217 			continue;
218 		if (c->request_count < request_count) {
219 			request_count = c->request_count;
220 			core = c;
221 		}
222 		if (!request_count)
223 			break;
224 	}
225 
226 	return core;
227 }
228 
vpu_core_is_exist(struct vpu_dev * vpu,struct vpu_core * core)229 static bool vpu_core_is_exist(struct vpu_dev *vpu, struct vpu_core *core)
230 {
231 	struct vpu_core *c;
232 
233 	list_for_each_entry(c, &vpu->cores, list) {
234 		if (c == core)
235 			return true;
236 	}
237 
238 	return false;
239 }
240 
vpu_core_get_vpu(struct vpu_core * core)241 static void vpu_core_get_vpu(struct vpu_core *core)
242 {
243 	core->vpu->get_vpu(core->vpu);
244 	if (core->type == VPU_CORE_TYPE_ENC)
245 		core->vpu->get_enc(core->vpu);
246 	if (core->type == VPU_CORE_TYPE_DEC)
247 		core->vpu->get_dec(core->vpu);
248 }
249 
vpu_core_register(struct device * dev,struct vpu_core * core)250 static int vpu_core_register(struct device *dev, struct vpu_core *core)
251 {
252 	struct vpu_dev *vpu = dev_get_drvdata(dev);
253 	int ret = 0;
254 
255 	dev_dbg(core->dev, "register core %s\n", vpu_core_type_desc(core->type));
256 	if (vpu_core_is_exist(vpu, core))
257 		return 0;
258 
259 	core->workqueue = alloc_workqueue("vpu", WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
260 	if (!core->workqueue) {
261 		dev_err(core->dev, "fail to alloc workqueue\n");
262 		return -ENOMEM;
263 	}
264 	INIT_WORK(&core->msg_work, vpu_msg_run_work);
265 	INIT_DELAYED_WORK(&core->msg_delayed_work, vpu_msg_delayed_work);
266 	core->msg_buffer_size = roundup_pow_of_two(VPU_MSG_BUFFER_SIZE);
267 	core->msg_buffer = vzalloc(core->msg_buffer_size);
268 	if (!core->msg_buffer) {
269 		dev_err(core->dev, "failed allocate buffer for fifo\n");
270 		ret = -ENOMEM;
271 		goto error;
272 	}
273 	ret = kfifo_init(&core->msg_fifo, core->msg_buffer, core->msg_buffer_size);
274 	if (ret) {
275 		dev_err(core->dev, "failed init kfifo\n");
276 		goto error;
277 	}
278 
279 	list_add_tail(&core->list, &vpu->cores);
280 	vpu_core_get_vpu(core);
281 
282 	return 0;
283 error:
284 	if (core->msg_buffer) {
285 		vfree(core->msg_buffer);
286 		core->msg_buffer = NULL;
287 	}
288 	if (core->workqueue) {
289 		destroy_workqueue(core->workqueue);
290 		core->workqueue = NULL;
291 	}
292 	return ret;
293 }
294 
vpu_core_put_vpu(struct vpu_core * core)295 static void vpu_core_put_vpu(struct vpu_core *core)
296 {
297 	if (core->type == VPU_CORE_TYPE_ENC)
298 		core->vpu->put_enc(core->vpu);
299 	if (core->type == VPU_CORE_TYPE_DEC)
300 		core->vpu->put_dec(core->vpu);
301 	core->vpu->put_vpu(core->vpu);
302 }
303 
vpu_core_unregister(struct device * dev,struct vpu_core * core)304 static int vpu_core_unregister(struct device *dev, struct vpu_core *core)
305 {
306 	list_del_init(&core->list);
307 
308 	vpu_core_put_vpu(core);
309 	core->vpu = NULL;
310 	vfree(core->msg_buffer);
311 	core->msg_buffer = NULL;
312 
313 	if (core->workqueue) {
314 		cancel_work_sync(&core->msg_work);
315 		cancel_delayed_work_sync(&core->msg_delayed_work);
316 		destroy_workqueue(core->workqueue);
317 		core->workqueue = NULL;
318 	}
319 
320 	return 0;
321 }
322 
vpu_core_acquire_instance(struct vpu_core * core)323 static int vpu_core_acquire_instance(struct vpu_core *core)
324 {
325 	int id;
326 
327 	id = ffz(core->instance_mask);
328 	if (id >= core->supported_instance_count)
329 		return -EINVAL;
330 
331 	set_bit(id, &core->instance_mask);
332 
333 	return id;
334 }
335 
vpu_core_release_instance(struct vpu_core * core,int id)336 static void vpu_core_release_instance(struct vpu_core *core, int id)
337 {
338 	if (id < 0 || id >= core->supported_instance_count)
339 		return;
340 
341 	clear_bit(id, &core->instance_mask);
342 }
343 
vpu_inst_get(struct vpu_inst * inst)344 struct vpu_inst *vpu_inst_get(struct vpu_inst *inst)
345 {
346 	if (!inst)
347 		return NULL;
348 
349 	atomic_inc(&inst->ref_count);
350 
351 	return inst;
352 }
353 
vpu_inst_put(struct vpu_inst * inst)354 void vpu_inst_put(struct vpu_inst *inst)
355 {
356 	if (!inst)
357 		return;
358 	if (atomic_dec_and_test(&inst->ref_count)) {
359 		if (inst->release)
360 			inst->release(inst);
361 	}
362 }
363 
vpu_request_core(struct vpu_dev * vpu,enum vpu_core_type type)364 struct vpu_core *vpu_request_core(struct vpu_dev *vpu, enum vpu_core_type type)
365 {
366 	struct vpu_core *core = NULL;
367 	int ret;
368 
369 	mutex_lock(&vpu->lock);
370 
371 	core = vpu_core_find_proper_by_type(vpu, type);
372 	if (!core)
373 		goto exit;
374 
375 	mutex_lock(&core->lock);
376 	pm_runtime_resume_and_get(core->dev);
377 
378 	if (core->state == VPU_CORE_DEINIT) {
379 		if (vpu_iface_get_power_state(core))
380 			ret = vpu_core_restore(core);
381 		else
382 			ret = vpu_core_boot(core, true);
383 		if (ret) {
384 			pm_runtime_put_sync(core->dev);
385 			mutex_unlock(&core->lock);
386 			core = NULL;
387 			goto exit;
388 		}
389 	}
390 
391 	core->request_count++;
392 
393 	mutex_unlock(&core->lock);
394 exit:
395 	mutex_unlock(&vpu->lock);
396 
397 	return core;
398 }
399 
vpu_release_core(struct vpu_core * core)400 void vpu_release_core(struct vpu_core *core)
401 {
402 	if (!core)
403 		return;
404 
405 	mutex_lock(&core->lock);
406 	pm_runtime_put_sync(core->dev);
407 	if (core->request_count)
408 		core->request_count--;
409 	mutex_unlock(&core->lock);
410 }
411 
vpu_inst_register(struct vpu_inst * inst)412 int vpu_inst_register(struct vpu_inst *inst)
413 {
414 	struct vpu_dev *vpu;
415 	struct vpu_core *core;
416 	int ret = 0;
417 
418 	vpu = inst->vpu;
419 	core = inst->core;
420 	if (!core) {
421 		core = vpu_request_core(vpu, inst->type);
422 		if (!core) {
423 			dev_err(vpu->dev, "there is no vpu core for %s\n",
424 				vpu_core_type_desc(inst->type));
425 			return -EINVAL;
426 		}
427 		inst->core = core;
428 		inst->dev = get_device(core->dev);
429 	}
430 
431 	mutex_lock(&core->lock);
432 	if (core->state != VPU_CORE_ACTIVE) {
433 		dev_err(core->dev, "vpu core is not active, state = %d\n", core->state);
434 		ret = -EINVAL;
435 		goto exit;
436 	}
437 
438 	if (inst->id >= 0 && inst->id < core->supported_instance_count)
439 		goto exit;
440 
441 	ret = vpu_core_acquire_instance(core);
442 	if (ret < 0)
443 		goto exit;
444 
445 	vpu_trace(inst->dev, "[%d] %p\n", ret, inst);
446 	inst->id = ret;
447 	list_add_tail(&inst->list, &core->instances);
448 	ret = 0;
449 	if (core->res->act_size) {
450 		inst->act.phys = core->act.phys + core->res->act_size * inst->id;
451 		inst->act.virt = core->act.virt + core->res->act_size * inst->id;
452 		inst->act.length = core->res->act_size;
453 	}
454 	vpu_inst_create_dbgfs_file(inst);
455 exit:
456 	mutex_unlock(&core->lock);
457 
458 	if (ret)
459 		dev_err(core->dev, "register instance fail\n");
460 	return ret;
461 }
462 
vpu_inst_unregister(struct vpu_inst * inst)463 int vpu_inst_unregister(struct vpu_inst *inst)
464 {
465 	struct vpu_core *core;
466 
467 	if (!inst->core)
468 		return 0;
469 
470 	core = inst->core;
471 	vpu_clear_request(inst);
472 	mutex_lock(&core->lock);
473 	if (inst->id >= 0 && inst->id < core->supported_instance_count) {
474 		vpu_inst_remove_dbgfs_file(inst);
475 		list_del_init(&inst->list);
476 		vpu_core_release_instance(core, inst->id);
477 		inst->id = VPU_INST_NULL_ID;
478 	}
479 	vpu_core_update_state(core);
480 	if (core->state == VPU_CORE_HANG && !core->instance_mask) {
481 		int err;
482 
483 		dev_info(core->dev, "reset hang core\n");
484 		mutex_unlock(&core->lock);
485 		err = vpu_core_sw_reset(core);
486 		mutex_lock(&core->lock);
487 		if (!err) {
488 			vpu_core_set_state(core, VPU_CORE_ACTIVE);
489 			core->hang_mask = 0;
490 		}
491 	}
492 	mutex_unlock(&core->lock);
493 
494 	return 0;
495 }
496 
vpu_core_find_instance(struct vpu_core * core,u32 index)497 struct vpu_inst *vpu_core_find_instance(struct vpu_core *core, u32 index)
498 {
499 	struct vpu_inst *inst = NULL;
500 	struct vpu_inst *tmp;
501 
502 	mutex_lock(&core->lock);
503 	if (index >= core->supported_instance_count || !test_bit(index, &core->instance_mask))
504 		goto exit;
505 	list_for_each_entry(tmp, &core->instances, list) {
506 		if (tmp->id == index) {
507 			inst = vpu_inst_get(tmp);
508 			break;
509 		}
510 	}
511 exit:
512 	mutex_unlock(&core->lock);
513 
514 	return inst;
515 }
516 
vpu_get_resource(struct vpu_inst * inst)517 const struct vpu_core_resources *vpu_get_resource(struct vpu_inst *inst)
518 {
519 	struct vpu_dev *vpu;
520 	struct vpu_core *core = NULL;
521 	const struct vpu_core_resources *res = NULL;
522 
523 	if (!inst || !inst->vpu)
524 		return NULL;
525 
526 	if (inst->core && inst->core->res)
527 		return inst->core->res;
528 
529 	vpu = inst->vpu;
530 	mutex_lock(&vpu->lock);
531 	list_for_each_entry(core, &vpu->cores, list) {
532 		if (core->type == inst->type) {
533 			res = core->res;
534 			break;
535 		}
536 	}
537 	mutex_unlock(&vpu->lock);
538 
539 	return res;
540 }
541 
vpu_core_parse_dt(struct vpu_core * core,struct device_node * np)542 static int vpu_core_parse_dt(struct vpu_core *core, struct device_node *np)
543 {
544 	struct device_node *node;
545 	struct resource res;
546 	int ret;
547 
548 	if (of_count_phandle_with_args(np, "memory-region", NULL) < 2) {
549 		dev_err(core->dev, "need 2 memory-region for boot and rpc\n");
550 		return -ENODEV;
551 	}
552 
553 	node = of_parse_phandle(np, "memory-region", 0);
554 	if (!node) {
555 		dev_err(core->dev, "boot-region of_parse_phandle error\n");
556 		return -ENODEV;
557 	}
558 	if (of_address_to_resource(node, 0, &res)) {
559 		dev_err(core->dev, "boot-region of_address_to_resource error\n");
560 		of_node_put(node);
561 		return -EINVAL;
562 	}
563 	core->fw.phys = res.start;
564 	core->fw.length = resource_size(&res);
565 
566 	of_node_put(node);
567 
568 	node = of_parse_phandle(np, "memory-region", 1);
569 	if (!node) {
570 		dev_err(core->dev, "rpc-region of_parse_phandle error\n");
571 		return -ENODEV;
572 	}
573 	if (of_address_to_resource(node, 0, &res)) {
574 		dev_err(core->dev, "rpc-region of_address_to_resource error\n");
575 		of_node_put(node);
576 		return -EINVAL;
577 	}
578 	core->rpc.phys = res.start;
579 	core->rpc.length = resource_size(&res);
580 
581 	if (core->rpc.length < core->res->rpc_size + core->res->fwlog_size) {
582 		dev_err(core->dev, "the rpc-region <%pad, 0x%x> is not enough\n",
583 			&core->rpc.phys, core->rpc.length);
584 		of_node_put(node);
585 		return -EINVAL;
586 	}
587 
588 	core->fw.virt = memremap(core->fw.phys, core->fw.length, MEMREMAP_WC);
589 	core->rpc.virt = memremap(core->rpc.phys, core->rpc.length, MEMREMAP_WC);
590 	memset(core->rpc.virt, 0, core->rpc.length);
591 
592 	ret = vpu_iface_check_memory_region(core, core->rpc.phys, core->rpc.length);
593 	if (ret != VPU_CORE_MEMORY_UNCACHED) {
594 		dev_err(core->dev, "rpc region<%pad, 0x%x> isn't uncached\n",
595 			&core->rpc.phys, core->rpc.length);
596 		of_node_put(node);
597 		return -EINVAL;
598 	}
599 
600 	core->log.phys = core->rpc.phys + core->res->rpc_size;
601 	core->log.virt = core->rpc.virt + core->res->rpc_size;
602 	core->log.length = core->res->fwlog_size;
603 	core->act.phys = core->log.phys + core->log.length;
604 	core->act.virt = core->log.virt + core->log.length;
605 	core->act.length = core->rpc.length - core->res->rpc_size - core->log.length;
606 	core->rpc.length = core->res->rpc_size;
607 
608 	of_node_put(node);
609 
610 	return 0;
611 }
612 
vpu_core_probe(struct platform_device * pdev)613 static int vpu_core_probe(struct platform_device *pdev)
614 {
615 	struct device *dev = &pdev->dev;
616 	struct vpu_core *core;
617 	struct vpu_dev *vpu = dev_get_drvdata(dev->parent);
618 	struct vpu_shared_addr *iface;
619 	u32 iface_data_size;
620 	int ret;
621 
622 	dev_dbg(dev, "probe\n");
623 	if (!vpu)
624 		return -EINVAL;
625 	core = devm_kzalloc(dev, sizeof(*core), GFP_KERNEL);
626 	if (!core)
627 		return -ENOMEM;
628 
629 	core->pdev = pdev;
630 	core->dev = dev;
631 	platform_set_drvdata(pdev, core);
632 	core->vpu = vpu;
633 	INIT_LIST_HEAD(&core->instances);
634 	mutex_init(&core->lock);
635 	mutex_init(&core->cmd_lock);
636 	init_completion(&core->cmp);
637 	init_waitqueue_head(&core->ack_wq);
638 	vpu_core_set_state(core, VPU_CORE_DEINIT);
639 
640 	core->res = of_device_get_match_data(dev);
641 	if (!core->res)
642 		return -ENODEV;
643 
644 	core->type = core->res->type;
645 	core->id = of_alias_get_id(dev->of_node, "vpu_core");
646 	if (core->id < 0) {
647 		dev_err(dev, "can't get vpu core id\n");
648 		return core->id;
649 	}
650 	dev_info(core->dev, "[%d] = %s\n", core->id, vpu_core_type_desc(core->type));
651 	ret = vpu_core_parse_dt(core, dev->of_node);
652 	if (ret)
653 		return ret;
654 
655 	core->base = devm_platform_ioremap_resource(pdev, 0);
656 	if (IS_ERR(core->base))
657 		return PTR_ERR(core->base);
658 
659 	if (!vpu_iface_check_codec(core)) {
660 		dev_err(core->dev, "is not supported\n");
661 		return -EINVAL;
662 	}
663 
664 	ret = vpu_mbox_init(core);
665 	if (ret)
666 		return ret;
667 
668 	iface = devm_kzalloc(dev, sizeof(*iface), GFP_KERNEL);
669 	if (!iface)
670 		return -ENOMEM;
671 
672 	iface_data_size = vpu_iface_get_data_size(core);
673 	if (iface_data_size) {
674 		iface->priv = devm_kzalloc(dev, iface_data_size, GFP_KERNEL);
675 		if (!iface->priv)
676 			return -ENOMEM;
677 	}
678 
679 	ret = vpu_iface_init(core, iface, &core->rpc, core->fw.phys);
680 	if (ret) {
681 		dev_err(core->dev, "init iface fail, ret = %d\n", ret);
682 		return ret;
683 	}
684 
685 	vpu_iface_config_system(core, vpu->res->mreg_base, vpu->base);
686 	vpu_iface_set_log_buf(core, &core->log);
687 
688 	pm_runtime_enable(dev);
689 	ret = pm_runtime_resume_and_get(dev);
690 	if (ret) {
691 		pm_runtime_put_noidle(dev);
692 		pm_runtime_set_suspended(dev);
693 		goto err_runtime_disable;
694 	}
695 
696 	ret = vpu_core_register(dev->parent, core);
697 	if (ret)
698 		goto err_core_register;
699 	core->parent = dev->parent;
700 
701 	pm_runtime_put_sync(dev);
702 	vpu_core_create_dbgfs_file(core);
703 
704 	return 0;
705 
706 err_core_register:
707 	pm_runtime_put_sync(dev);
708 err_runtime_disable:
709 	pm_runtime_disable(dev);
710 
711 	return ret;
712 }
713 
vpu_core_remove(struct platform_device * pdev)714 static int vpu_core_remove(struct platform_device *pdev)
715 {
716 	struct device *dev = &pdev->dev;
717 	struct vpu_core *core = platform_get_drvdata(pdev);
718 	int ret;
719 
720 	vpu_core_remove_dbgfs_file(core);
721 	ret = pm_runtime_resume_and_get(dev);
722 	WARN_ON(ret < 0);
723 
724 	vpu_core_shutdown(core);
725 	pm_runtime_put_sync(dev);
726 	pm_runtime_disable(dev);
727 
728 	vpu_core_unregister(core->parent, core);
729 	memunmap(core->fw.virt);
730 	memunmap(core->rpc.virt);
731 	mutex_destroy(&core->lock);
732 	mutex_destroy(&core->cmd_lock);
733 
734 	return 0;
735 }
736 
vpu_core_runtime_resume(struct device * dev)737 static int __maybe_unused vpu_core_runtime_resume(struct device *dev)
738 {
739 	struct vpu_core *core = dev_get_drvdata(dev);
740 
741 	return vpu_mbox_request(core);
742 }
743 
vpu_core_runtime_suspend(struct device * dev)744 static int __maybe_unused vpu_core_runtime_suspend(struct device *dev)
745 {
746 	struct vpu_core *core = dev_get_drvdata(dev);
747 
748 	vpu_mbox_free(core);
749 	return 0;
750 }
751 
vpu_core_cancel_work(struct vpu_core * core)752 static void vpu_core_cancel_work(struct vpu_core *core)
753 {
754 	struct vpu_inst *inst = NULL;
755 
756 	cancel_work_sync(&core->msg_work);
757 	cancel_delayed_work_sync(&core->msg_delayed_work);
758 
759 	mutex_lock(&core->lock);
760 	list_for_each_entry(inst, &core->instances, list)
761 		cancel_work_sync(&inst->msg_work);
762 	mutex_unlock(&core->lock);
763 }
764 
vpu_core_resume_work(struct vpu_core * core)765 static void vpu_core_resume_work(struct vpu_core *core)
766 {
767 	struct vpu_inst *inst = NULL;
768 	unsigned long delay = msecs_to_jiffies(10);
769 
770 	queue_work(core->workqueue, &core->msg_work);
771 	queue_delayed_work(core->workqueue, &core->msg_delayed_work, delay);
772 
773 	mutex_lock(&core->lock);
774 	list_for_each_entry(inst, &core->instances, list)
775 		queue_work(inst->workqueue, &inst->msg_work);
776 	mutex_unlock(&core->lock);
777 }
778 
vpu_core_resume(struct device * dev)779 static int __maybe_unused vpu_core_resume(struct device *dev)
780 {
781 	struct vpu_core *core = dev_get_drvdata(dev);
782 	int ret = 0;
783 
784 	mutex_lock(&core->lock);
785 	pm_runtime_resume_and_get(dev);
786 	vpu_core_get_vpu(core);
787 
788 	if (core->request_count) {
789 		if (!vpu_iface_get_power_state(core))
790 			ret = vpu_core_boot(core, false);
791 		else
792 			ret = vpu_core_sw_reset(core);
793 		if (ret) {
794 			dev_err(core->dev, "resume fail\n");
795 			vpu_core_set_state(core, VPU_CORE_HANG);
796 		}
797 	}
798 	vpu_core_update_state(core);
799 	pm_runtime_put_sync(dev);
800 	mutex_unlock(&core->lock);
801 
802 	vpu_core_resume_work(core);
803 	return ret;
804 }
805 
vpu_core_suspend(struct device * dev)806 static int __maybe_unused vpu_core_suspend(struct device *dev)
807 {
808 	struct vpu_core *core = dev_get_drvdata(dev);
809 	int ret = 0;
810 
811 	mutex_lock(&core->lock);
812 	if (core->request_count)
813 		ret = vpu_core_snapshot(core);
814 	mutex_unlock(&core->lock);
815 	if (ret)
816 		return ret;
817 
818 	vpu_core_cancel_work(core);
819 
820 	mutex_lock(&core->lock);
821 	vpu_core_put_vpu(core);
822 	mutex_unlock(&core->lock);
823 	return ret;
824 }
825 
826 static const struct dev_pm_ops vpu_core_pm_ops = {
827 	SET_RUNTIME_PM_OPS(vpu_core_runtime_suspend, vpu_core_runtime_resume, NULL)
828 	SET_SYSTEM_SLEEP_PM_OPS(vpu_core_suspend, vpu_core_resume)
829 };
830 
831 static struct vpu_core_resources imx8q_enc = {
832 	.type = VPU_CORE_TYPE_ENC,
833 	.fwname = "amphion/vpu/vpu_fw_imx8_enc.bin",
834 	.stride = 16,
835 	.max_width = 1920,
836 	.max_height = 1920,
837 	.min_width = 64,
838 	.min_height = 48,
839 	.step_width = 2,
840 	.step_height = 2,
841 	.rpc_size = 0x80000,
842 	.fwlog_size = 0x80000,
843 	.act_size = 0xc0000,
844 };
845 
846 static struct vpu_core_resources imx8q_dec = {
847 	.type = VPU_CORE_TYPE_DEC,
848 	.fwname = "amphion/vpu/vpu_fw_imx8_dec.bin",
849 	.stride = 256,
850 	.max_width = 8188,
851 	.max_height = 8188,
852 	.min_width = 16,
853 	.min_height = 16,
854 	.step_width = 1,
855 	.step_height = 1,
856 	.rpc_size = 0x80000,
857 	.fwlog_size = 0x80000,
858 };
859 
860 static const struct of_device_id vpu_core_dt_match[] = {
861 	{ .compatible = "nxp,imx8q-vpu-encoder", .data = &imx8q_enc },
862 	{ .compatible = "nxp,imx8q-vpu-decoder", .data = &imx8q_dec },
863 	{}
864 };
865 MODULE_DEVICE_TABLE(of, vpu_core_dt_match);
866 
867 static struct platform_driver amphion_vpu_core_driver = {
868 	.probe = vpu_core_probe,
869 	.remove = vpu_core_remove,
870 	.driver = {
871 		.name = "amphion-vpu-core",
872 		.of_match_table = vpu_core_dt_match,
873 		.pm = &vpu_core_pm_ops,
874 	},
875 };
876 
vpu_core_driver_init(void)877 int __init vpu_core_driver_init(void)
878 {
879 	return platform_driver_register(&amphion_vpu_core_driver);
880 }
881 
vpu_core_driver_exit(void)882 void __exit vpu_core_driver_exit(void)
883 {
884 	platform_driver_unregister(&amphion_vpu_core_driver);
885 }
886