1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Hantro VPU codec driver
4 *
5 * Copyright (C) 2018 Collabora, Ltd.
6 * Copyright 2018 Google LLC.
7 * Tomasz Figa <tfiga@chromium.org>
8 *
9 * Based on s5p-mfc driver by Samsung Electronics Co., Ltd.
10 * Copyright (C) 2011 Samsung Electronics Co., Ltd.
11 */
12
13 #include <linux/clk.h>
14 #include <linux/module.h>
15 #include <linux/of.h>
16 #include <linux/platform_device.h>
17 #include <linux/pm.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/slab.h>
20 #include <linux/videodev2.h>
21 #include <linux/workqueue.h>
22 #include <media/v4l2-event.h>
23 #include <media/v4l2-mem2mem.h>
24 #include <media/videobuf2-core.h>
25 #include <media/videobuf2-vmalloc.h>
26
27 #include "hantro_v4l2.h"
28 #include "hantro.h"
29 #include "hantro_hw.h"
30
31 #define DRIVER_NAME "hantro-vpu"
32
33 int hantro_debug;
34 module_param_named(debug, hantro_debug, int, 0644);
35 MODULE_PARM_DESC(debug,
36 "Debug level - higher value produces more verbose messages");
37
hantro_get_ctrl(struct hantro_ctx * ctx,u32 id)38 void *hantro_get_ctrl(struct hantro_ctx *ctx, u32 id)
39 {
40 struct v4l2_ctrl *ctrl;
41
42 ctrl = v4l2_ctrl_find(&ctx->ctrl_handler, id);
43 return ctrl ? ctrl->p_cur.p : NULL;
44 }
45
hantro_get_ref(struct hantro_ctx * ctx,u64 ts)46 dma_addr_t hantro_get_ref(struct hantro_ctx *ctx, u64 ts)
47 {
48 struct vb2_queue *q = v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx);
49 struct vb2_buffer *buf;
50
51 buf = vb2_find_buffer(q, ts);
52 if (!buf)
53 return 0;
54 return hantro_get_dec_buf_addr(ctx, buf);
55 }
56
57 static const struct v4l2_event hantro_eos_event = {
58 .type = V4L2_EVENT_EOS
59 };
60
hantro_job_finish_no_pm(struct hantro_dev * vpu,struct hantro_ctx * ctx,enum vb2_buffer_state result)61 static void hantro_job_finish_no_pm(struct hantro_dev *vpu,
62 struct hantro_ctx *ctx,
63 enum vb2_buffer_state result)
64 {
65 struct vb2_v4l2_buffer *src, *dst;
66
67 src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
68 dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
69
70 if (WARN_ON(!src))
71 return;
72 if (WARN_ON(!dst))
73 return;
74
75 src->sequence = ctx->sequence_out++;
76 dst->sequence = ctx->sequence_cap++;
77
78 if (v4l2_m2m_is_last_draining_src_buf(ctx->fh.m2m_ctx, src)) {
79 dst->flags |= V4L2_BUF_FLAG_LAST;
80 v4l2_event_queue_fh(&ctx->fh, &hantro_eos_event);
81 v4l2_m2m_mark_stopped(ctx->fh.m2m_ctx);
82 }
83
84 v4l2_m2m_buf_done_and_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx,
85 result);
86 }
87
hantro_job_finish(struct hantro_dev * vpu,struct hantro_ctx * ctx,enum vb2_buffer_state result)88 static void hantro_job_finish(struct hantro_dev *vpu,
89 struct hantro_ctx *ctx,
90 enum vb2_buffer_state result)
91 {
92 pm_runtime_mark_last_busy(vpu->dev);
93 pm_runtime_put_autosuspend(vpu->dev);
94
95 clk_bulk_disable(vpu->variant->num_clocks, vpu->clocks);
96
97 hantro_job_finish_no_pm(vpu, ctx, result);
98 }
99
hantro_irq_done(struct hantro_dev * vpu,enum vb2_buffer_state result)100 void hantro_irq_done(struct hantro_dev *vpu,
101 enum vb2_buffer_state result)
102 {
103 struct hantro_ctx *ctx =
104 v4l2_m2m_get_curr_priv(vpu->m2m_dev);
105
106 /*
107 * If cancel_delayed_work returns false
108 * the timeout expired. The watchdog is running,
109 * and will take care of finishing the job.
110 */
111 if (cancel_delayed_work(&vpu->watchdog_work)) {
112 if (result == VB2_BUF_STATE_DONE && ctx->codec_ops->done)
113 ctx->codec_ops->done(ctx);
114 hantro_job_finish(vpu, ctx, result);
115 }
116 }
117
hantro_watchdog(struct work_struct * work)118 void hantro_watchdog(struct work_struct *work)
119 {
120 struct hantro_dev *vpu;
121 struct hantro_ctx *ctx;
122
123 vpu = container_of(to_delayed_work(work),
124 struct hantro_dev, watchdog_work);
125 ctx = v4l2_m2m_get_curr_priv(vpu->m2m_dev);
126 if (ctx) {
127 vpu_err("frame processing timed out!\n");
128 if (ctx->codec_ops->reset)
129 ctx->codec_ops->reset(ctx);
130 hantro_job_finish(vpu, ctx, VB2_BUF_STATE_ERROR);
131 }
132 }
133
hantro_start_prepare_run(struct hantro_ctx * ctx)134 void hantro_start_prepare_run(struct hantro_ctx *ctx)
135 {
136 struct vb2_v4l2_buffer *src_buf;
137
138 src_buf = hantro_get_src_buf(ctx);
139 v4l2_ctrl_request_setup(src_buf->vb2_buf.req_obj.req,
140 &ctx->ctrl_handler);
141
142 if (!ctx->is_encoder && !ctx->dev->variant->late_postproc) {
143 if (hantro_needs_postproc(ctx, ctx->vpu_dst_fmt))
144 hantro_postproc_enable(ctx);
145 else
146 hantro_postproc_disable(ctx);
147 }
148 }
149
hantro_end_prepare_run(struct hantro_ctx * ctx)150 void hantro_end_prepare_run(struct hantro_ctx *ctx)
151 {
152 struct vb2_v4l2_buffer *src_buf;
153
154 if (!ctx->is_encoder && ctx->dev->variant->late_postproc) {
155 if (hantro_needs_postproc(ctx, ctx->vpu_dst_fmt))
156 hantro_postproc_enable(ctx);
157 else
158 hantro_postproc_disable(ctx);
159 }
160
161 src_buf = hantro_get_src_buf(ctx);
162 v4l2_ctrl_request_complete(src_buf->vb2_buf.req_obj.req,
163 &ctx->ctrl_handler);
164
165 /* Kick the watchdog. */
166 schedule_delayed_work(&ctx->dev->watchdog_work,
167 msecs_to_jiffies(2000));
168 }
169
device_run(void * priv)170 static void device_run(void *priv)
171 {
172 struct hantro_ctx *ctx = priv;
173 struct vb2_v4l2_buffer *src, *dst;
174 int ret;
175
176 src = hantro_get_src_buf(ctx);
177 dst = hantro_get_dst_buf(ctx);
178
179 ret = pm_runtime_resume_and_get(ctx->dev->dev);
180 if (ret < 0)
181 goto err_cancel_job;
182
183 ret = clk_bulk_enable(ctx->dev->variant->num_clocks, ctx->dev->clocks);
184 if (ret)
185 goto err_cancel_job;
186
187 v4l2_m2m_buf_copy_metadata(src, dst, true);
188
189 if (ctx->codec_ops->run(ctx))
190 goto err_cancel_job;
191
192 return;
193
194 err_cancel_job:
195 hantro_job_finish_no_pm(ctx->dev, ctx, VB2_BUF_STATE_ERROR);
196 }
197
198 static const struct v4l2_m2m_ops vpu_m2m_ops = {
199 .device_run = device_run,
200 };
201
202 static int
queue_init(void * priv,struct vb2_queue * src_vq,struct vb2_queue * dst_vq)203 queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
204 {
205 struct hantro_ctx *ctx = priv;
206 int ret;
207
208 src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
209 src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
210 src_vq->drv_priv = ctx;
211 src_vq->ops = &hantro_queue_ops;
212 src_vq->mem_ops = &vb2_dma_contig_memops;
213
214 /*
215 * Driver does mostly sequential access, so sacrifice TLB efficiency
216 * for faster allocation. Also, no CPU access on the source queue,
217 * so no kernel mapping needed.
218 */
219 src_vq->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES |
220 DMA_ATTR_NO_KERNEL_MAPPING;
221 src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
222 src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
223 src_vq->lock = &ctx->dev->vpu_mutex;
224 src_vq->dev = ctx->dev->v4l2_dev.dev;
225 src_vq->supports_requests = true;
226
227 ret = vb2_queue_init(src_vq);
228 if (ret)
229 return ret;
230
231 dst_vq->bidirectional = true;
232 dst_vq->mem_ops = &vb2_dma_contig_memops;
233 dst_vq->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES;
234 /*
235 * The Kernel needs access to the JPEG destination buffer for the
236 * JPEG encoder to fill in the JPEG headers.
237 */
238 if (!ctx->is_encoder)
239 dst_vq->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
240
241 dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
242 dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
243 dst_vq->drv_priv = ctx;
244 dst_vq->ops = &hantro_queue_ops;
245 dst_vq->buf_struct_size = sizeof(struct hantro_decoded_buffer);
246 dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
247 dst_vq->lock = &ctx->dev->vpu_mutex;
248 dst_vq->dev = ctx->dev->v4l2_dev.dev;
249
250 return vb2_queue_init(dst_vq);
251 }
252
hantro_try_ctrl(struct v4l2_ctrl * ctrl)253 static int hantro_try_ctrl(struct v4l2_ctrl *ctrl)
254 {
255 struct hantro_ctx *ctx;
256
257 ctx = container_of(ctrl->handler,
258 struct hantro_ctx, ctrl_handler);
259
260 if (ctrl->id == V4L2_CID_STATELESS_H264_SPS) {
261 const struct v4l2_ctrl_h264_sps *sps = ctrl->p_new.p_h264_sps;
262
263 if (sps->chroma_format_idc > 1)
264 /* Only 4:0:0 and 4:2:0 are supported */
265 return -EINVAL;
266 if (sps->bit_depth_luma_minus8 != sps->bit_depth_chroma_minus8)
267 /* Luma and chroma bit depth mismatch */
268 return -EINVAL;
269 if (sps->bit_depth_luma_minus8 != 0)
270 /* Only 8-bit is supported */
271 return -EINVAL;
272 } else if (ctrl->id == V4L2_CID_STATELESS_HEVC_SPS) {
273 const struct v4l2_ctrl_hevc_sps *sps = ctrl->p_new.p_hevc_sps;
274
275 if (sps->bit_depth_luma_minus8 != 0 && sps->bit_depth_luma_minus8 != 2)
276 /* Only 8-bit and 10-bit are supported */
277 return -EINVAL;
278
279 ctx->bit_depth = sps->bit_depth_luma_minus8 + 8;
280 } else if (ctrl->id == V4L2_CID_STATELESS_VP9_FRAME) {
281 const struct v4l2_ctrl_vp9_frame *dec_params = ctrl->p_new.p_vp9_frame;
282
283 /* We only support profile 0 */
284 if (dec_params->profile != 0)
285 return -EINVAL;
286 }
287 return 0;
288 }
289
hantro_jpeg_s_ctrl(struct v4l2_ctrl * ctrl)290 static int hantro_jpeg_s_ctrl(struct v4l2_ctrl *ctrl)
291 {
292 struct hantro_ctx *ctx;
293
294 ctx = container_of(ctrl->handler,
295 struct hantro_ctx, ctrl_handler);
296
297 vpu_debug(1, "s_ctrl: id = %d, val = %d\n", ctrl->id, ctrl->val);
298
299 switch (ctrl->id) {
300 case V4L2_CID_JPEG_COMPRESSION_QUALITY:
301 ctx->jpeg_quality = ctrl->val;
302 break;
303 default:
304 return -EINVAL;
305 }
306
307 return 0;
308 }
309
hantro_vp9_s_ctrl(struct v4l2_ctrl * ctrl)310 static int hantro_vp9_s_ctrl(struct v4l2_ctrl *ctrl)
311 {
312 struct hantro_ctx *ctx;
313
314 ctx = container_of(ctrl->handler,
315 struct hantro_ctx, ctrl_handler);
316
317 switch (ctrl->id) {
318 case V4L2_CID_STATELESS_VP9_FRAME:
319 ctx->bit_depth = ctrl->p_new.p_vp9_frame->bit_depth;
320 break;
321 default:
322 return -EINVAL;
323 }
324
325 return 0;
326 }
327
328 static const struct v4l2_ctrl_ops hantro_ctrl_ops = {
329 .try_ctrl = hantro_try_ctrl,
330 };
331
332 static const struct v4l2_ctrl_ops hantro_jpeg_ctrl_ops = {
333 .s_ctrl = hantro_jpeg_s_ctrl,
334 };
335
336 static const struct v4l2_ctrl_ops hantro_vp9_ctrl_ops = {
337 .s_ctrl = hantro_vp9_s_ctrl,
338 };
339
340 #define HANTRO_JPEG_ACTIVE_MARKERS (V4L2_JPEG_ACTIVE_MARKER_APP0 | \
341 V4L2_JPEG_ACTIVE_MARKER_COM | \
342 V4L2_JPEG_ACTIVE_MARKER_DQT | \
343 V4L2_JPEG_ACTIVE_MARKER_DHT)
344
345 static const struct hantro_ctrl controls[] = {
346 {
347 .codec = HANTRO_JPEG_ENCODER,
348 .cfg = {
349 .id = V4L2_CID_JPEG_COMPRESSION_QUALITY,
350 .min = 5,
351 .max = 100,
352 .step = 1,
353 .def = 50,
354 .ops = &hantro_jpeg_ctrl_ops,
355 },
356 }, {
357 .codec = HANTRO_JPEG_ENCODER,
358 .cfg = {
359 .id = V4L2_CID_JPEG_ACTIVE_MARKER,
360 .max = HANTRO_JPEG_ACTIVE_MARKERS,
361 .def = HANTRO_JPEG_ACTIVE_MARKERS,
362 /*
363 * Changing the set of active markers/segments also
364 * messes up the alignment of the JPEG header, which
365 * is needed to allow the hardware to write directly
366 * to the output buffer. Implementing this introduces
367 * a lot of complexity for little gain, as the markers
368 * enabled is already the minimum required set.
369 */
370 .flags = V4L2_CTRL_FLAG_READ_ONLY,
371 },
372 }, {
373 .codec = HANTRO_MPEG2_DECODER,
374 .cfg = {
375 .id = V4L2_CID_STATELESS_MPEG2_SEQUENCE,
376 },
377 }, {
378 .codec = HANTRO_MPEG2_DECODER,
379 .cfg = {
380 .id = V4L2_CID_STATELESS_MPEG2_PICTURE,
381 },
382 }, {
383 .codec = HANTRO_MPEG2_DECODER,
384 .cfg = {
385 .id = V4L2_CID_STATELESS_MPEG2_QUANTISATION,
386 },
387 }, {
388 .codec = HANTRO_VP8_DECODER,
389 .cfg = {
390 .id = V4L2_CID_STATELESS_VP8_FRAME,
391 },
392 }, {
393 .codec = HANTRO_H264_DECODER,
394 .cfg = {
395 .id = V4L2_CID_STATELESS_H264_DECODE_PARAMS,
396 },
397 }, {
398 .codec = HANTRO_H264_DECODER,
399 .cfg = {
400 .id = V4L2_CID_STATELESS_H264_SPS,
401 .ops = &hantro_ctrl_ops,
402 },
403 }, {
404 .codec = HANTRO_H264_DECODER,
405 .cfg = {
406 .id = V4L2_CID_STATELESS_H264_PPS,
407 },
408 }, {
409 .codec = HANTRO_H264_DECODER,
410 .cfg = {
411 .id = V4L2_CID_STATELESS_H264_SCALING_MATRIX,
412 },
413 }, {
414 .codec = HANTRO_H264_DECODER,
415 .cfg = {
416 .id = V4L2_CID_STATELESS_H264_DECODE_MODE,
417 .min = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED,
418 .def = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED,
419 .max = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED,
420 },
421 }, {
422 .codec = HANTRO_H264_DECODER,
423 .cfg = {
424 .id = V4L2_CID_STATELESS_H264_START_CODE,
425 .min = V4L2_STATELESS_H264_START_CODE_ANNEX_B,
426 .def = V4L2_STATELESS_H264_START_CODE_ANNEX_B,
427 .max = V4L2_STATELESS_H264_START_CODE_ANNEX_B,
428 },
429 }, {
430 .codec = HANTRO_H264_DECODER,
431 .cfg = {
432 .id = V4L2_CID_MPEG_VIDEO_H264_PROFILE,
433 .min = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
434 .max = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
435 .menu_skip_mask =
436 BIT(V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED),
437 .def = V4L2_MPEG_VIDEO_H264_PROFILE_MAIN,
438 }
439 }, {
440 .codec = HANTRO_HEVC_DECODER,
441 .cfg = {
442 .id = V4L2_CID_STATELESS_HEVC_DECODE_MODE,
443 .min = V4L2_STATELESS_HEVC_DECODE_MODE_FRAME_BASED,
444 .max = V4L2_STATELESS_HEVC_DECODE_MODE_FRAME_BASED,
445 .def = V4L2_STATELESS_HEVC_DECODE_MODE_FRAME_BASED,
446 },
447 }, {
448 .codec = HANTRO_HEVC_DECODER,
449 .cfg = {
450 .id = V4L2_CID_STATELESS_HEVC_START_CODE,
451 .min = V4L2_STATELESS_HEVC_START_CODE_ANNEX_B,
452 .max = V4L2_STATELESS_HEVC_START_CODE_ANNEX_B,
453 .def = V4L2_STATELESS_HEVC_START_CODE_ANNEX_B,
454 },
455 }, {
456 .codec = HANTRO_HEVC_DECODER,
457 .cfg = {
458 .id = V4L2_CID_MPEG_VIDEO_HEVC_PROFILE,
459 .min = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN,
460 .max = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10,
461 .def = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN,
462 },
463 }, {
464 .codec = HANTRO_HEVC_DECODER,
465 .cfg = {
466 .id = V4L2_CID_MPEG_VIDEO_HEVC_LEVEL,
467 .min = V4L2_MPEG_VIDEO_HEVC_LEVEL_1,
468 .max = V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1,
469 },
470 }, {
471 .codec = HANTRO_HEVC_DECODER,
472 .cfg = {
473 .id = V4L2_CID_STATELESS_HEVC_SPS,
474 .ops = &hantro_ctrl_ops,
475 },
476 }, {
477 .codec = HANTRO_HEVC_DECODER,
478 .cfg = {
479 .id = V4L2_CID_STATELESS_HEVC_PPS,
480 },
481 }, {
482 .codec = HANTRO_HEVC_DECODER,
483 .cfg = {
484 .id = V4L2_CID_STATELESS_HEVC_DECODE_PARAMS,
485 },
486 }, {
487 .codec = HANTRO_HEVC_DECODER,
488 .cfg = {
489 .id = V4L2_CID_STATELESS_HEVC_SCALING_MATRIX,
490 },
491 }, {
492 .codec = HANTRO_VP9_DECODER,
493 .cfg = {
494 .id = V4L2_CID_STATELESS_VP9_FRAME,
495 .ops = &hantro_vp9_ctrl_ops,
496 },
497 }, {
498 .codec = HANTRO_VP9_DECODER,
499 .cfg = {
500 .id = V4L2_CID_STATELESS_VP9_COMPRESSED_HDR,
501 },
502 },
503 };
504
hantro_ctrls_setup(struct hantro_dev * vpu,struct hantro_ctx * ctx,int allowed_codecs)505 static int hantro_ctrls_setup(struct hantro_dev *vpu,
506 struct hantro_ctx *ctx,
507 int allowed_codecs)
508 {
509 int i, num_ctrls = ARRAY_SIZE(controls);
510
511 v4l2_ctrl_handler_init(&ctx->ctrl_handler, num_ctrls);
512
513 for (i = 0; i < num_ctrls; i++) {
514 if (!(allowed_codecs & controls[i].codec))
515 continue;
516
517 v4l2_ctrl_new_custom(&ctx->ctrl_handler,
518 &controls[i].cfg, NULL);
519 if (ctx->ctrl_handler.error) {
520 vpu_err("Adding control (%d) failed %d\n",
521 controls[i].cfg.id,
522 ctx->ctrl_handler.error);
523 v4l2_ctrl_handler_free(&ctx->ctrl_handler);
524 return ctx->ctrl_handler.error;
525 }
526 }
527 return v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
528 }
529
530 /*
531 * V4L2 file operations.
532 */
533
hantro_open(struct file * filp)534 static int hantro_open(struct file *filp)
535 {
536 struct hantro_dev *vpu = video_drvdata(filp);
537 struct video_device *vdev = video_devdata(filp);
538 struct hantro_func *func = hantro_vdev_to_func(vdev);
539 struct hantro_ctx *ctx;
540 int allowed_codecs, ret;
541
542 /*
543 * We do not need any extra locking here, because we operate only
544 * on local data here, except reading few fields from dev, which
545 * do not change through device's lifetime (which is guaranteed by
546 * reference on module from open()) and V4L2 internal objects (such
547 * as vdev and ctx->fh), which have proper locking done in respective
548 * helper functions used here.
549 */
550
551 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
552 if (!ctx)
553 return -ENOMEM;
554
555 ctx->dev = vpu;
556 if (func->id == MEDIA_ENT_F_PROC_VIDEO_ENCODER) {
557 allowed_codecs = vpu->variant->codec & HANTRO_ENCODERS;
558 ctx->is_encoder = true;
559 } else if (func->id == MEDIA_ENT_F_PROC_VIDEO_DECODER) {
560 allowed_codecs = vpu->variant->codec & HANTRO_DECODERS;
561 ctx->is_encoder = false;
562 } else {
563 ret = -ENODEV;
564 goto err_ctx_free;
565 }
566
567 ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(vpu->m2m_dev, ctx, queue_init);
568 if (IS_ERR(ctx->fh.m2m_ctx)) {
569 ret = PTR_ERR(ctx->fh.m2m_ctx);
570 goto err_ctx_free;
571 }
572
573 v4l2_fh_init(&ctx->fh, vdev);
574 filp->private_data = &ctx->fh;
575 v4l2_fh_add(&ctx->fh);
576
577 hantro_reset_fmts(ctx);
578
579 ret = hantro_ctrls_setup(vpu, ctx, allowed_codecs);
580 if (ret) {
581 vpu_err("Failed to set up controls\n");
582 goto err_fh_free;
583 }
584 ctx->fh.ctrl_handler = &ctx->ctrl_handler;
585
586 return 0;
587
588 err_fh_free:
589 v4l2_fh_del(&ctx->fh);
590 v4l2_fh_exit(&ctx->fh);
591 err_ctx_free:
592 kfree(ctx);
593 return ret;
594 }
595
hantro_release(struct file * filp)596 static int hantro_release(struct file *filp)
597 {
598 struct hantro_ctx *ctx =
599 container_of(filp->private_data, struct hantro_ctx, fh);
600
601 /*
602 * No need for extra locking because this was the last reference
603 * to this file.
604 */
605 v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
606 v4l2_fh_del(&ctx->fh);
607 v4l2_fh_exit(&ctx->fh);
608 v4l2_ctrl_handler_free(&ctx->ctrl_handler);
609 kfree(ctx);
610
611 return 0;
612 }
613
614 static const struct v4l2_file_operations hantro_fops = {
615 .owner = THIS_MODULE,
616 .open = hantro_open,
617 .release = hantro_release,
618 .poll = v4l2_m2m_fop_poll,
619 .unlocked_ioctl = video_ioctl2,
620 .mmap = v4l2_m2m_fop_mmap,
621 };
622
623 static const struct of_device_id of_hantro_match[] = {
624 #ifdef CONFIG_VIDEO_HANTRO_ROCKCHIP
625 { .compatible = "rockchip,px30-vpu", .data = &px30_vpu_variant, },
626 { .compatible = "rockchip,rk3036-vpu", .data = &rk3036_vpu_variant, },
627 { .compatible = "rockchip,rk3066-vpu", .data = &rk3066_vpu_variant, },
628 { .compatible = "rockchip,rk3288-vpu", .data = &rk3288_vpu_variant, },
629 { .compatible = "rockchip,rk3328-vpu", .data = &rk3328_vpu_variant, },
630 { .compatible = "rockchip,rk3399-vpu", .data = &rk3399_vpu_variant, },
631 { .compatible = "rockchip,rk3568-vepu", .data = &rk3568_vepu_variant, },
632 { .compatible = "rockchip,rk3568-vpu", .data = &rk3568_vpu_variant, },
633 #endif
634 #ifdef CONFIG_VIDEO_HANTRO_IMX8M
635 { .compatible = "nxp,imx8mm-vpu-g1", .data = &imx8mm_vpu_g1_variant, },
636 { .compatible = "nxp,imx8mq-vpu", .data = &imx8mq_vpu_variant, },
637 { .compatible = "nxp,imx8mq-vpu-g1", .data = &imx8mq_vpu_g1_variant },
638 { .compatible = "nxp,imx8mq-vpu-g2", .data = &imx8mq_vpu_g2_variant },
639 #endif
640 #ifdef CONFIG_VIDEO_HANTRO_SAMA5D4
641 { .compatible = "microchip,sama5d4-vdec", .data = &sama5d4_vdec_variant, },
642 #endif
643 #ifdef CONFIG_VIDEO_HANTRO_SUNXI
644 { .compatible = "allwinner,sun50i-h6-vpu-g2", .data = &sunxi_vpu_variant, },
645 #endif
646 { /* sentinel */ }
647 };
648 MODULE_DEVICE_TABLE(of, of_hantro_match);
649
hantro_register_entity(struct media_device * mdev,struct media_entity * entity,const char * entity_name,struct media_pad * pads,int num_pads,int function,struct video_device * vdev)650 static int hantro_register_entity(struct media_device *mdev,
651 struct media_entity *entity,
652 const char *entity_name,
653 struct media_pad *pads, int num_pads,
654 int function, struct video_device *vdev)
655 {
656 char *name;
657 int ret;
658
659 entity->obj_type = MEDIA_ENTITY_TYPE_BASE;
660 if (function == MEDIA_ENT_F_IO_V4L) {
661 entity->info.dev.major = VIDEO_MAJOR;
662 entity->info.dev.minor = vdev->minor;
663 }
664
665 name = devm_kasprintf(mdev->dev, GFP_KERNEL, "%s-%s", vdev->name,
666 entity_name);
667 if (!name)
668 return -ENOMEM;
669
670 entity->name = name;
671 entity->function = function;
672
673 ret = media_entity_pads_init(entity, num_pads, pads);
674 if (ret)
675 return ret;
676
677 ret = media_device_register_entity(mdev, entity);
678 if (ret)
679 return ret;
680
681 return 0;
682 }
683
hantro_attach_func(struct hantro_dev * vpu,struct hantro_func * func)684 static int hantro_attach_func(struct hantro_dev *vpu,
685 struct hantro_func *func)
686 {
687 struct media_device *mdev = &vpu->mdev;
688 struct media_link *link;
689 int ret;
690
691 /* Create the three encoder entities with their pads */
692 func->source_pad.flags = MEDIA_PAD_FL_SOURCE;
693 ret = hantro_register_entity(mdev, &func->vdev.entity, "source",
694 &func->source_pad, 1, MEDIA_ENT_F_IO_V4L,
695 &func->vdev);
696 if (ret)
697 return ret;
698
699 func->proc_pads[0].flags = MEDIA_PAD_FL_SINK;
700 func->proc_pads[1].flags = MEDIA_PAD_FL_SOURCE;
701 ret = hantro_register_entity(mdev, &func->proc, "proc",
702 func->proc_pads, 2, func->id,
703 &func->vdev);
704 if (ret)
705 goto err_rel_entity0;
706
707 func->sink_pad.flags = MEDIA_PAD_FL_SINK;
708 ret = hantro_register_entity(mdev, &func->sink, "sink",
709 &func->sink_pad, 1, MEDIA_ENT_F_IO_V4L,
710 &func->vdev);
711 if (ret)
712 goto err_rel_entity1;
713
714 /* Connect the three entities */
715 ret = media_create_pad_link(&func->vdev.entity, 0, &func->proc, 0,
716 MEDIA_LNK_FL_IMMUTABLE |
717 MEDIA_LNK_FL_ENABLED);
718 if (ret)
719 goto err_rel_entity2;
720
721 ret = media_create_pad_link(&func->proc, 1, &func->sink, 0,
722 MEDIA_LNK_FL_IMMUTABLE |
723 MEDIA_LNK_FL_ENABLED);
724 if (ret)
725 goto err_rm_links0;
726
727 /* Create video interface */
728 func->intf_devnode = media_devnode_create(mdev, MEDIA_INTF_T_V4L_VIDEO,
729 0, VIDEO_MAJOR,
730 func->vdev.minor);
731 if (!func->intf_devnode) {
732 ret = -ENOMEM;
733 goto err_rm_links1;
734 }
735
736 /* Connect the two DMA engines to the interface */
737 link = media_create_intf_link(&func->vdev.entity,
738 &func->intf_devnode->intf,
739 MEDIA_LNK_FL_IMMUTABLE |
740 MEDIA_LNK_FL_ENABLED);
741 if (!link) {
742 ret = -ENOMEM;
743 goto err_rm_devnode;
744 }
745
746 link = media_create_intf_link(&func->sink, &func->intf_devnode->intf,
747 MEDIA_LNK_FL_IMMUTABLE |
748 MEDIA_LNK_FL_ENABLED);
749 if (!link) {
750 ret = -ENOMEM;
751 goto err_rm_devnode;
752 }
753 return 0;
754
755 err_rm_devnode:
756 media_devnode_remove(func->intf_devnode);
757
758 err_rm_links1:
759 media_entity_remove_links(&func->sink);
760
761 err_rm_links0:
762 media_entity_remove_links(&func->proc);
763 media_entity_remove_links(&func->vdev.entity);
764
765 err_rel_entity2:
766 media_device_unregister_entity(&func->sink);
767
768 err_rel_entity1:
769 media_device_unregister_entity(&func->proc);
770
771 err_rel_entity0:
772 media_device_unregister_entity(&func->vdev.entity);
773 return ret;
774 }
775
hantro_detach_func(struct hantro_func * func)776 static void hantro_detach_func(struct hantro_func *func)
777 {
778 media_devnode_remove(func->intf_devnode);
779 media_entity_remove_links(&func->sink);
780 media_entity_remove_links(&func->proc);
781 media_entity_remove_links(&func->vdev.entity);
782 media_device_unregister_entity(&func->sink);
783 media_device_unregister_entity(&func->proc);
784 media_device_unregister_entity(&func->vdev.entity);
785 }
786
hantro_add_func(struct hantro_dev * vpu,unsigned int funcid)787 static int hantro_add_func(struct hantro_dev *vpu, unsigned int funcid)
788 {
789 const struct of_device_id *match;
790 struct hantro_func *func;
791 struct video_device *vfd;
792 int ret;
793
794 match = of_match_node(of_hantro_match, vpu->dev->of_node);
795 func = devm_kzalloc(vpu->dev, sizeof(*func), GFP_KERNEL);
796 if (!func) {
797 v4l2_err(&vpu->v4l2_dev, "Failed to allocate video device\n");
798 return -ENOMEM;
799 }
800
801 func->id = funcid;
802
803 vfd = &func->vdev;
804 vfd->fops = &hantro_fops;
805 vfd->release = video_device_release_empty;
806 vfd->lock = &vpu->vpu_mutex;
807 vfd->v4l2_dev = &vpu->v4l2_dev;
808 vfd->vfl_dir = VFL_DIR_M2M;
809 vfd->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE;
810 vfd->ioctl_ops = &hantro_ioctl_ops;
811 snprintf(vfd->name, sizeof(vfd->name), "%s-%s", match->compatible,
812 funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER ? "enc" : "dec");
813
814 if (funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER) {
815 vpu->encoder = func;
816 v4l2_disable_ioctl(vfd, VIDIOC_TRY_DECODER_CMD);
817 v4l2_disable_ioctl(vfd, VIDIOC_DECODER_CMD);
818 } else {
819 vpu->decoder = func;
820 v4l2_disable_ioctl(vfd, VIDIOC_TRY_ENCODER_CMD);
821 v4l2_disable_ioctl(vfd, VIDIOC_ENCODER_CMD);
822 }
823
824 video_set_drvdata(vfd, vpu);
825
826 ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1);
827 if (ret) {
828 v4l2_err(&vpu->v4l2_dev, "Failed to register video device\n");
829 return ret;
830 }
831
832 ret = hantro_attach_func(vpu, func);
833 if (ret) {
834 v4l2_err(&vpu->v4l2_dev,
835 "Failed to attach functionality to the media device\n");
836 goto err_unreg_dev;
837 }
838
839 v4l2_info(&vpu->v4l2_dev, "registered %s as /dev/video%d\n", vfd->name,
840 vfd->num);
841
842 return 0;
843
844 err_unreg_dev:
845 video_unregister_device(vfd);
846 return ret;
847 }
848
hantro_add_enc_func(struct hantro_dev * vpu)849 static int hantro_add_enc_func(struct hantro_dev *vpu)
850 {
851 if (!vpu->variant->enc_fmts)
852 return 0;
853
854 return hantro_add_func(vpu, MEDIA_ENT_F_PROC_VIDEO_ENCODER);
855 }
856
hantro_add_dec_func(struct hantro_dev * vpu)857 static int hantro_add_dec_func(struct hantro_dev *vpu)
858 {
859 if (!vpu->variant->dec_fmts)
860 return 0;
861
862 return hantro_add_func(vpu, MEDIA_ENT_F_PROC_VIDEO_DECODER);
863 }
864
hantro_remove_func(struct hantro_dev * vpu,unsigned int funcid)865 static void hantro_remove_func(struct hantro_dev *vpu,
866 unsigned int funcid)
867 {
868 struct hantro_func *func;
869
870 if (funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER)
871 func = vpu->encoder;
872 else
873 func = vpu->decoder;
874
875 if (!func)
876 return;
877
878 hantro_detach_func(func);
879 video_unregister_device(&func->vdev);
880 }
881
hantro_remove_enc_func(struct hantro_dev * vpu)882 static void hantro_remove_enc_func(struct hantro_dev *vpu)
883 {
884 hantro_remove_func(vpu, MEDIA_ENT_F_PROC_VIDEO_ENCODER);
885 }
886
hantro_remove_dec_func(struct hantro_dev * vpu)887 static void hantro_remove_dec_func(struct hantro_dev *vpu)
888 {
889 hantro_remove_func(vpu, MEDIA_ENT_F_PROC_VIDEO_DECODER);
890 }
891
892 static const struct media_device_ops hantro_m2m_media_ops = {
893 .req_validate = vb2_request_validate,
894 .req_queue = v4l2_m2m_request_queue,
895 };
896
hantro_probe(struct platform_device * pdev)897 static int hantro_probe(struct platform_device *pdev)
898 {
899 const struct of_device_id *match;
900 struct hantro_dev *vpu;
901 struct resource *res;
902 int num_bases;
903 int i, ret;
904
905 vpu = devm_kzalloc(&pdev->dev, sizeof(*vpu), GFP_KERNEL);
906 if (!vpu)
907 return -ENOMEM;
908
909 vpu->dev = &pdev->dev;
910 vpu->pdev = pdev;
911 mutex_init(&vpu->vpu_mutex);
912 spin_lock_init(&vpu->irqlock);
913
914 match = of_match_node(of_hantro_match, pdev->dev.of_node);
915 vpu->variant = match->data;
916
917 /*
918 * Support for nxp,imx8mq-vpu is kept for backwards compatibility
919 * but it's deprecated. Please update your DTS file to use
920 * nxp,imx8mq-vpu-g1 or nxp,imx8mq-vpu-g2 instead.
921 */
922 if (of_device_is_compatible(pdev->dev.of_node, "nxp,imx8mq-vpu"))
923 dev_warn(&pdev->dev, "%s compatible is deprecated\n",
924 match->compatible);
925
926 INIT_DELAYED_WORK(&vpu->watchdog_work, hantro_watchdog);
927
928 vpu->clocks = devm_kcalloc(&pdev->dev, vpu->variant->num_clocks,
929 sizeof(*vpu->clocks), GFP_KERNEL);
930 if (!vpu->clocks)
931 return -ENOMEM;
932
933 if (vpu->variant->num_clocks > 1) {
934 for (i = 0; i < vpu->variant->num_clocks; i++)
935 vpu->clocks[i].id = vpu->variant->clk_names[i];
936
937 ret = devm_clk_bulk_get(&pdev->dev, vpu->variant->num_clocks,
938 vpu->clocks);
939 if (ret)
940 return ret;
941 } else {
942 /*
943 * If the driver has a single clk, chances are there will be no
944 * actual name in the DT bindings.
945 */
946 vpu->clocks[0].clk = devm_clk_get(&pdev->dev, NULL);
947 if (IS_ERR(vpu->clocks[0].clk))
948 return PTR_ERR(vpu->clocks[0].clk);
949 }
950
951 vpu->resets = devm_reset_control_array_get(&pdev->dev, false, true);
952 if (IS_ERR(vpu->resets))
953 return PTR_ERR(vpu->resets);
954
955 num_bases = vpu->variant->num_regs ?: 1;
956 vpu->reg_bases = devm_kcalloc(&pdev->dev, num_bases,
957 sizeof(*vpu->reg_bases), GFP_KERNEL);
958 if (!vpu->reg_bases)
959 return -ENOMEM;
960
961 for (i = 0; i < num_bases; i++) {
962 res = vpu->variant->reg_names ?
963 platform_get_resource_byname(vpu->pdev, IORESOURCE_MEM,
964 vpu->variant->reg_names[i]) :
965 platform_get_resource(vpu->pdev, IORESOURCE_MEM, 0);
966 vpu->reg_bases[i] = devm_ioremap_resource(vpu->dev, res);
967 if (IS_ERR(vpu->reg_bases[i]))
968 return PTR_ERR(vpu->reg_bases[i]);
969 }
970 vpu->enc_base = vpu->reg_bases[0] + vpu->variant->enc_offset;
971 vpu->dec_base = vpu->reg_bases[0] + vpu->variant->dec_offset;
972
973 /**
974 * TODO: Eventually allow taking advantage of full 64-bit address space.
975 * Until then we assume the MSB portion of buffers' base addresses is
976 * always 0 due to this masking operation.
977 */
978 ret = dma_set_coherent_mask(vpu->dev, DMA_BIT_MASK(32));
979 if (ret) {
980 dev_err(vpu->dev, "Could not set DMA coherent mask.\n");
981 return ret;
982 }
983 vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
984
985 for (i = 0; i < vpu->variant->num_irqs; i++) {
986 const char *irq_name;
987 int irq;
988
989 if (!vpu->variant->irqs[i].handler)
990 continue;
991
992 if (vpu->variant->num_irqs > 1) {
993 irq_name = vpu->variant->irqs[i].name;
994 irq = platform_get_irq_byname(vpu->pdev, irq_name);
995 } else {
996 /*
997 * If the driver has a single IRQ, chances are there
998 * will be no actual name in the DT bindings.
999 */
1000 irq_name = "default";
1001 irq = platform_get_irq(vpu->pdev, 0);
1002 }
1003 if (irq <= 0)
1004 return -ENXIO;
1005
1006 ret = devm_request_irq(vpu->dev, irq,
1007 vpu->variant->irqs[i].handler, 0,
1008 dev_name(vpu->dev), vpu);
1009 if (ret) {
1010 dev_err(vpu->dev, "Could not request %s IRQ.\n",
1011 irq_name);
1012 return ret;
1013 }
1014 }
1015
1016 if (vpu->variant->init) {
1017 ret = vpu->variant->init(vpu);
1018 if (ret) {
1019 dev_err(&pdev->dev, "Failed to init VPU hardware\n");
1020 return ret;
1021 }
1022 }
1023
1024 pm_runtime_set_autosuspend_delay(vpu->dev, 100);
1025 pm_runtime_use_autosuspend(vpu->dev);
1026 pm_runtime_enable(vpu->dev);
1027
1028 ret = reset_control_deassert(vpu->resets);
1029 if (ret) {
1030 dev_err(&pdev->dev, "Failed to deassert resets\n");
1031 goto err_pm_disable;
1032 }
1033
1034 ret = clk_bulk_prepare(vpu->variant->num_clocks, vpu->clocks);
1035 if (ret) {
1036 dev_err(&pdev->dev, "Failed to prepare clocks\n");
1037 goto err_rst_assert;
1038 }
1039
1040 ret = v4l2_device_register(&pdev->dev, &vpu->v4l2_dev);
1041 if (ret) {
1042 dev_err(&pdev->dev, "Failed to register v4l2 device\n");
1043 goto err_clk_unprepare;
1044 }
1045 platform_set_drvdata(pdev, vpu);
1046
1047 vpu->m2m_dev = v4l2_m2m_init(&vpu_m2m_ops);
1048 if (IS_ERR(vpu->m2m_dev)) {
1049 v4l2_err(&vpu->v4l2_dev, "Failed to init mem2mem device\n");
1050 ret = PTR_ERR(vpu->m2m_dev);
1051 goto err_v4l2_unreg;
1052 }
1053
1054 vpu->mdev.dev = vpu->dev;
1055 strscpy(vpu->mdev.model, DRIVER_NAME, sizeof(vpu->mdev.model));
1056 strscpy(vpu->mdev.bus_info, "platform: " DRIVER_NAME,
1057 sizeof(vpu->mdev.bus_info));
1058 media_device_init(&vpu->mdev);
1059 vpu->mdev.ops = &hantro_m2m_media_ops;
1060 vpu->v4l2_dev.mdev = &vpu->mdev;
1061
1062 ret = hantro_add_enc_func(vpu);
1063 if (ret) {
1064 dev_err(&pdev->dev, "Failed to register encoder\n");
1065 goto err_m2m_rel;
1066 }
1067
1068 ret = hantro_add_dec_func(vpu);
1069 if (ret) {
1070 dev_err(&pdev->dev, "Failed to register decoder\n");
1071 goto err_rm_enc_func;
1072 }
1073
1074 ret = media_device_register(&vpu->mdev);
1075 if (ret) {
1076 v4l2_err(&vpu->v4l2_dev, "Failed to register mem2mem media device\n");
1077 goto err_rm_dec_func;
1078 }
1079
1080 return 0;
1081
1082 err_rm_dec_func:
1083 hantro_remove_dec_func(vpu);
1084 err_rm_enc_func:
1085 hantro_remove_enc_func(vpu);
1086 err_m2m_rel:
1087 media_device_cleanup(&vpu->mdev);
1088 v4l2_m2m_release(vpu->m2m_dev);
1089 err_v4l2_unreg:
1090 v4l2_device_unregister(&vpu->v4l2_dev);
1091 err_clk_unprepare:
1092 clk_bulk_unprepare(vpu->variant->num_clocks, vpu->clocks);
1093 err_rst_assert:
1094 reset_control_assert(vpu->resets);
1095 err_pm_disable:
1096 pm_runtime_dont_use_autosuspend(vpu->dev);
1097 pm_runtime_disable(vpu->dev);
1098 return ret;
1099 }
1100
hantro_remove(struct platform_device * pdev)1101 static int hantro_remove(struct platform_device *pdev)
1102 {
1103 struct hantro_dev *vpu = platform_get_drvdata(pdev);
1104
1105 v4l2_info(&vpu->v4l2_dev, "Removing %s\n", pdev->name);
1106
1107 media_device_unregister(&vpu->mdev);
1108 hantro_remove_dec_func(vpu);
1109 hantro_remove_enc_func(vpu);
1110 media_device_cleanup(&vpu->mdev);
1111 v4l2_m2m_release(vpu->m2m_dev);
1112 v4l2_device_unregister(&vpu->v4l2_dev);
1113 clk_bulk_unprepare(vpu->variant->num_clocks, vpu->clocks);
1114 reset_control_assert(vpu->resets);
1115 pm_runtime_dont_use_autosuspend(vpu->dev);
1116 pm_runtime_disable(vpu->dev);
1117 return 0;
1118 }
1119
1120 #ifdef CONFIG_PM
hantro_runtime_resume(struct device * dev)1121 static int hantro_runtime_resume(struct device *dev)
1122 {
1123 struct hantro_dev *vpu = dev_get_drvdata(dev);
1124
1125 if (vpu->variant->runtime_resume)
1126 return vpu->variant->runtime_resume(vpu);
1127
1128 return 0;
1129 }
1130 #endif
1131
1132 static const struct dev_pm_ops hantro_pm_ops = {
1133 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1134 pm_runtime_force_resume)
1135 SET_RUNTIME_PM_OPS(NULL, hantro_runtime_resume, NULL)
1136 };
1137
1138 static struct platform_driver hantro_driver = {
1139 .probe = hantro_probe,
1140 .remove = hantro_remove,
1141 .driver = {
1142 .name = DRIVER_NAME,
1143 .of_match_table = of_match_ptr(of_hantro_match),
1144 .pm = &hantro_pm_ops,
1145 },
1146 };
1147 module_platform_driver(hantro_driver);
1148
1149 MODULE_LICENSE("GPL v2");
1150 MODULE_AUTHOR("Alpha Lin <Alpha.Lin@Rock-Chips.com>");
1151 MODULE_AUTHOR("Tomasz Figa <tfiga@chromium.org>");
1152 MODULE_AUTHOR("Ezequiel Garcia <ezequiel@collabora.com>");
1153 MODULE_DESCRIPTION("Hantro VPU codec driver");
1154