1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd. */
3
4 #include <linux/kfifo.h>
5 #include <media/v4l2-common.h>
6 #include <media/v4l2-ioctl.h>
7 #include <media/videobuf2-core.h>
8 #include <media/videobuf2-vmalloc.h> /* for ISP statistics */
9 #include "dev.h"
10 #include "regs.h"
11 #include "isp_mipi_luma.h"
12
13 #define RKISP_ISP_LUMA_REQ_BUFS_MIN 2
14 #define RKISP_ISP_LUMA_REQ_BUFS_MAX 8
15
rkisp_luma_enum_fmt_meta_cap(struct file * file,void * priv,struct v4l2_fmtdesc * f)16 static int rkisp_luma_enum_fmt_meta_cap(struct file *file, void *priv,
17 struct v4l2_fmtdesc *f)
18 {
19 struct video_device *video = video_devdata(file);
20 struct rkisp_luma_vdev *luma_vdev = video_get_drvdata(video);
21
22 if (f->index > 0 || f->type != video->queue->type)
23 return -EINVAL;
24
25 f->pixelformat = luma_vdev->vdev_fmt.fmt.meta.dataformat;
26 return 0;
27 }
28
rkisp_luma_g_fmt_meta_cap(struct file * file,void * priv,struct v4l2_format * f)29 static int rkisp_luma_g_fmt_meta_cap(struct file *file, void *priv,
30 struct v4l2_format *f)
31 {
32 struct video_device *video = video_devdata(file);
33 struct rkisp_luma_vdev *luma_vdev = video_get_drvdata(video);
34 struct v4l2_meta_format *meta = &f->fmt.meta;
35
36 if (f->type != video->queue->type)
37 return -EINVAL;
38
39 memset(meta, 0, sizeof(*meta));
40 meta->dataformat = luma_vdev->vdev_fmt.fmt.meta.dataformat;
41 meta->buffersize = luma_vdev->vdev_fmt.fmt.meta.buffersize;
42
43 return 0;
44 }
45
rkisp_luma_querycap(struct file * file,void * priv,struct v4l2_capability * cap)46 static int rkisp_luma_querycap(struct file *file,
47 void *priv, struct v4l2_capability *cap)
48 {
49 struct video_device *vdev = video_devdata(file);
50 struct rkisp_luma_vdev *luma_vdev = video_get_drvdata(vdev);
51
52 strcpy(cap->driver, DRIVER_NAME);
53 snprintf(cap->driver, sizeof(cap->driver),
54 "%s_v%d", DRIVER_NAME,
55 luma_vdev->dev->isp_ver >> 4);
56 strlcpy(cap->card, vdev->name, sizeof(cap->card));
57 strlcpy(cap->bus_info, "platform: " DRIVER_NAME, sizeof(cap->bus_info));
58
59 return 0;
60 }
61
62 /* ISP video device IOCTLs */
63 static const struct v4l2_ioctl_ops rkisp_luma_ioctl = {
64 .vidioc_reqbufs = vb2_ioctl_reqbufs,
65 .vidioc_querybuf = vb2_ioctl_querybuf,
66 .vidioc_create_bufs = vb2_ioctl_create_bufs,
67 .vidioc_qbuf = vb2_ioctl_qbuf,
68 .vidioc_dqbuf = vb2_ioctl_dqbuf,
69 .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
70 .vidioc_expbuf = vb2_ioctl_expbuf,
71 .vidioc_streamon = vb2_ioctl_streamon,
72 .vidioc_streamoff = vb2_ioctl_streamoff,
73 .vidioc_enum_fmt_meta_cap = rkisp_luma_enum_fmt_meta_cap,
74 .vidioc_g_fmt_meta_cap = rkisp_luma_g_fmt_meta_cap,
75 .vidioc_s_fmt_meta_cap = rkisp_luma_g_fmt_meta_cap,
76 .vidioc_try_fmt_meta_cap = rkisp_luma_g_fmt_meta_cap,
77 .vidioc_querycap = rkisp_luma_querycap
78 };
79
rkisp_luma_fh_open(struct file * filp)80 static int rkisp_luma_fh_open(struct file *filp)
81 {
82 struct rkisp_luma_vdev *params = video_drvdata(filp);
83 int ret;
84
85 ret = v4l2_fh_open(filp);
86 if (!ret) {
87 ret = v4l2_pipeline_pm_get(¶ms->vnode.vdev.entity);
88 if (ret < 0)
89 vb2_fop_release(filp);
90 }
91
92 return ret;
93 }
94
rkisp_luma_fop_release(struct file * file)95 static int rkisp_luma_fop_release(struct file *file)
96 {
97 struct rkisp_luma_vdev *luma = video_drvdata(file);
98 int ret;
99
100 ret = vb2_fop_release(file);
101 if (!ret)
102 v4l2_pipeline_pm_put(&luma->vnode.vdev.entity);
103 return ret;
104 }
105
106 struct v4l2_file_operations rkisp_luma_fops = {
107 .mmap = vb2_fop_mmap,
108 .unlocked_ioctl = video_ioctl2,
109 .poll = vb2_fop_poll,
110 .open = rkisp_luma_fh_open,
111 .release = rkisp_luma_fop_release
112 };
113
rkisp_luma_vb2_queue_setup(struct vb2_queue * vq,unsigned int * num_buffers,unsigned int * num_planes,unsigned int sizes[],struct device * alloc_ctxs[])114 static int rkisp_luma_vb2_queue_setup(struct vb2_queue *vq,
115 unsigned int *num_buffers,
116 unsigned int *num_planes,
117 unsigned int sizes[],
118 struct device *alloc_ctxs[])
119 {
120 struct rkisp_luma_vdev *luma_vdev = vq->drv_priv;
121
122 *num_planes = 1;
123
124 *num_buffers = clamp_t(u32, *num_buffers, RKISP_ISP_LUMA_REQ_BUFS_MIN,
125 RKISP_ISP_LUMA_REQ_BUFS_MAX);
126
127 sizes[0] = sizeof(struct rkisp_isp2x_luma_buffer);
128
129 INIT_LIST_HEAD(&luma_vdev->stat);
130
131 return 0;
132 }
133
rkisp_luma_vb2_buf_queue(struct vb2_buffer * vb)134 static void rkisp_luma_vb2_buf_queue(struct vb2_buffer *vb)
135 {
136 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
137 struct rkisp_buffer *luma_buf = to_rkisp_buffer(vbuf);
138 struct vb2_queue *vq = vb->vb2_queue;
139 struct rkisp_luma_vdev *luma_dev = vq->drv_priv;
140
141 luma_buf->vaddr[0] = vb2_plane_vaddr(vb, 0);
142
143 spin_lock_bh(&luma_dev->rd_lock);
144 list_add_tail(&luma_buf->queue, &luma_dev->stat);
145 spin_unlock_bh(&luma_dev->rd_lock);
146 }
147
rkisp_luma_vb2_stop_streaming(struct vb2_queue * vq)148 static void rkisp_luma_vb2_stop_streaming(struct vb2_queue *vq)
149 {
150 struct rkisp_luma_vdev *luma_vdev = vq->drv_priv;
151 struct rkisp_buffer *buf;
152 unsigned long flags;
153 int i;
154
155 /* Make sure no new work queued in isr before draining wq */
156 spin_lock_irqsave(&luma_vdev->irq_lock, flags);
157 luma_vdev->streamon = false;
158 spin_unlock_irqrestore(&luma_vdev->irq_lock, flags);
159
160 tasklet_disable(&luma_vdev->rd_tasklet);
161
162 spin_lock_bh(&luma_vdev->rd_lock);
163 for (i = 0; i < RKISP_ISP_LUMA_REQ_BUFS_MAX; i++) {
164 if (list_empty(&luma_vdev->stat))
165 break;
166 buf = list_first_entry(&luma_vdev->stat,
167 struct rkisp_buffer, queue);
168 list_del(&buf->queue);
169 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
170 }
171 spin_unlock_bh(&luma_vdev->rd_lock);
172 }
173
174 static int
rkisp_luma_vb2_start_streaming(struct vb2_queue * queue,unsigned int count)175 rkisp_luma_vb2_start_streaming(struct vb2_queue *queue,
176 unsigned int count)
177 {
178 struct rkisp_luma_vdev *luma_vdev = queue->drv_priv;
179 u32 i;
180
181 for (i = 0; i < ISP2X_MIPI_RAW_MAX; i++) {
182 luma_vdev->ystat_isrcnt[i] = 0;
183 luma_vdev->ystat_rdflg[i] = false;
184 }
185
186 luma_vdev->streamon = true;
187 kfifo_reset(&luma_vdev->rd_kfifo);
188 tasklet_enable(&luma_vdev->rd_tasklet);
189
190 return 0;
191 }
192
193 static struct vb2_ops rkisp_luma_vb2_ops = {
194 .queue_setup = rkisp_luma_vb2_queue_setup,
195 .buf_queue = rkisp_luma_vb2_buf_queue,
196 .wait_prepare = vb2_ops_wait_prepare,
197 .wait_finish = vb2_ops_wait_finish,
198 .stop_streaming = rkisp_luma_vb2_stop_streaming,
199 .start_streaming = rkisp_luma_vb2_start_streaming,
200 };
201
rkisp_luma_init_vb2_queue(struct vb2_queue * q,struct rkisp_luma_vdev * luma_vdev)202 static int rkisp_luma_init_vb2_queue(struct vb2_queue *q,
203 struct rkisp_luma_vdev *luma_vdev)
204 {
205 q->type = V4L2_BUF_TYPE_META_CAPTURE;
206 q->io_modes = VB2_MMAP | VB2_USERPTR;
207 q->drv_priv = luma_vdev;
208 q->ops = &rkisp_luma_vb2_ops;
209 q->mem_ops = &vb2_vmalloc_memops;
210 q->buf_struct_size = sizeof(struct rkisp_buffer);
211 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
212 q->lock = &luma_vdev->dev->iqlock;
213 q->dev = luma_vdev->dev->dev;
214
215 return vb2_queue_init(q);
216 }
217
218 static void
rkisp_stats_send_luma(struct rkisp_luma_vdev * vdev,struct rkisp_luma_readout_work * work)219 rkisp_stats_send_luma(struct rkisp_luma_vdev *vdev,
220 struct rkisp_luma_readout_work *work)
221 {
222 unsigned int cur_frame_id;
223 struct rkisp_isp2x_luma_buffer *cur_stat_buf;
224 struct rkisp_buffer *cur_buf = NULL;
225 u32 i, j;
226
227 spin_lock(&vdev->rd_lock);
228 /* get one empty buffer */
229 if (!list_empty(&vdev->stat)) {
230 cur_buf = list_first_entry(&vdev->stat,
231 struct rkisp_buffer, queue);
232 list_del(&cur_buf->queue);
233 }
234 spin_unlock(&vdev->rd_lock);
235
236 if (!cur_buf) {
237 v4l2_warn(vdev->vnode.vdev.v4l2_dev,
238 "no luma buffer available\n");
239 return;
240 }
241
242 cur_stat_buf =
243 (struct rkisp_isp2x_luma_buffer *)(cur_buf->vaddr[0]);
244 if (!cur_stat_buf) {
245 v4l2_err(vdev->vnode.vdev.v4l2_dev,
246 "cur_stat_buf is NULL\n");
247 return;
248 }
249
250 cur_stat_buf->frame_id = work->frame_id;
251 cur_stat_buf->meas_type = work->meas_type;
252 for (i = 0; i < ISP2X_MIPI_RAW_MAX; i++) {
253 for (j = 0; j < ISP2X_MIPI_LUMA_MEAN_MAX; j++) {
254 cur_stat_buf->luma[i].exp_mean[j] =
255 work->luma[i].exp_mean[j];
256 }
257 }
258
259 cur_frame_id = cur_stat_buf->frame_id;
260 vb2_set_plane_payload(&cur_buf->vb.vb2_buf, 0,
261 sizeof(struct rkisp_isp2x_luma_buffer));
262 cur_buf->vb.sequence = cur_frame_id;
263 cur_buf->vb.vb2_buf.timestamp = work->timestamp;
264 vb2_buffer_done(&cur_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
265 }
266
rkisp_luma_readout_task(unsigned long data)267 static void rkisp_luma_readout_task(unsigned long data)
268 {
269 unsigned int out = 0;
270 struct rkisp_luma_readout_work work;
271 struct rkisp_luma_vdev *vdev =
272 (struct rkisp_luma_vdev *)data;
273
274 while (!kfifo_is_empty(&vdev->rd_kfifo)) {
275 out = kfifo_out(&vdev->rd_kfifo,
276 &work, sizeof(work));
277 if (!out)
278 break;
279
280 if (work.readout == RKISP_ISP_READOUT_LUMA)
281 rkisp_stats_send_luma(vdev, &work);
282 }
283 }
284
rkisp_luma_isr(struct rkisp_luma_vdev * luma_vdev,u32 isp_stat)285 void rkisp_luma_isr(struct rkisp_luma_vdev *luma_vdev, u32 isp_stat)
286 {
287 void __iomem *base = luma_vdev->dev->base_addr;
288 u8 op_mode = luma_vdev->dev->hdr.op_mode;
289 unsigned int cur_frame_id =
290 atomic_read(&luma_vdev->dev->isp_sdev.frm_sync_seq) - 1;
291 enum rkisp_luma_frm_mode frm_mode;
292 bool send_task;
293 u32 i, value;
294
295 spin_lock(&luma_vdev->irq_lock);
296 if (!luma_vdev->streamon)
297 goto unlock;
298
299 switch (op_mode) {
300 case HDR_RDBK_FRAME2:
301 frm_mode = RKISP_LUMA_TWOFRM;
302 break;
303 case HDR_RDBK_FRAME3:
304 frm_mode = RKISP_LUMA_THREEFRM;
305 break;
306 case HDR_RDBK_FRAME1:
307 frm_mode = RKISP_LUMA_ONEFRM;
308 break;
309 default:
310 goto unlock;
311 }
312
313 if (isp_stat & RAW0_Y_STATE)
314 luma_vdev->ystat_isrcnt[0]++;
315
316 if (isp_stat & RAW1_Y_STATE)
317 luma_vdev->ystat_isrcnt[1]++;
318
319 if (isp_stat & RAW2_Y_STATE)
320 luma_vdev->ystat_isrcnt[2]++;
321
322 if (isp_stat & RAW0_WR_FRAME) {
323 if (luma_vdev->ystat_isrcnt[0] != RKISP_LUMA_YSTAT_ISR_NUM) {
324 v4l2_dbg(1, rkisp_debug, luma_vdev->vnode.vdev.v4l2_dev,
325 "missing raw0 y state isr, %d\n",
326 luma_vdev->ystat_isrcnt[0]);
327 luma_vdev->ystat_isrcnt[0] = RKISP_LUMA_YSTAT_ISR_NUM;
328 }
329 }
330 if (isp_stat & RAW1_WR_FRAME) {
331 if (luma_vdev->ystat_isrcnt[1] != RKISP_LUMA_YSTAT_ISR_NUM) {
332 v4l2_dbg(1, rkisp_debug, luma_vdev->vnode.vdev.v4l2_dev,
333 "missing raw1 y state isr, %d\n",
334 luma_vdev->ystat_isrcnt[1]);
335 luma_vdev->ystat_isrcnt[1] = RKISP_LUMA_YSTAT_ISR_NUM;
336 }
337 }
338 if (isp_stat & RAW2_WR_FRAME) {
339 if (luma_vdev->ystat_isrcnt[2] != RKISP_LUMA_YSTAT_ISR_NUM) {
340 v4l2_dbg(1, rkisp_debug, luma_vdev->vnode.vdev.v4l2_dev,
341 "missing raw2 y state isr, %d\n",
342 luma_vdev->ystat_isrcnt[2]);
343 luma_vdev->ystat_isrcnt[2] = RKISP_LUMA_YSTAT_ISR_NUM;
344 }
345 }
346
347 if (luma_vdev->ystat_isrcnt[0] == RKISP_LUMA_YSTAT_ISR_NUM && !luma_vdev->ystat_rdflg[0]) {
348 value = readl(base + CSI2RX_Y_STAT_CTRL);
349 value &= ~(SW_Y_STAT_RD_FRM_ID(0x3));
350 value |= (SW_Y_STAT_RD_FRM_ID(0x0) | SW_Y_STAT_RD_EN);
351 writel(value, base + CSI2RX_Y_STAT_CTRL);
352 for (i = 0; i < ISP2X_MIPI_LUMA_MEAN_MAX; i++)
353 luma_vdev->work.luma[0].exp_mean[i] =
354 readl(base + CSI2RX_Y_STAT_RO);
355
356 luma_vdev->ystat_rdflg[0] = true;
357 }
358 if (luma_vdev->ystat_isrcnt[1] == RKISP_LUMA_YSTAT_ISR_NUM && !luma_vdev->ystat_rdflg[1]) {
359 value = readl(base + CSI2RX_Y_STAT_CTRL);
360 value &= ~(SW_Y_STAT_RD_FRM_ID(0x3));
361 value |= (SW_Y_STAT_RD_FRM_ID(0x1) | SW_Y_STAT_RD_EN);
362 writel(value, base + CSI2RX_Y_STAT_CTRL);
363 for (i = 0; i < ISP2X_MIPI_LUMA_MEAN_MAX; i++)
364 luma_vdev->work.luma[1].exp_mean[i] =
365 readl(base + CSI2RX_Y_STAT_RO);
366
367 luma_vdev->ystat_rdflg[1] = true;
368 }
369 if (luma_vdev->ystat_isrcnt[2] == RKISP_LUMA_YSTAT_ISR_NUM && !luma_vdev->ystat_rdflg[2]) {
370 value = readl(base + CSI2RX_Y_STAT_CTRL);
371 value &= ~(SW_Y_STAT_RD_FRM_ID(0x3));
372 value |= (SW_Y_STAT_RD_FRM_ID(0x2) | SW_Y_STAT_RD_EN);
373 writel(value, base + CSI2RX_Y_STAT_CTRL);
374 for (i = 0; i < ISP2X_MIPI_LUMA_MEAN_MAX; i++)
375 luma_vdev->work.luma[2].exp_mean[i] =
376 readl(base + CSI2RX_Y_STAT_RO);
377
378 luma_vdev->ystat_rdflg[2] = true;
379 }
380
381 send_task = false;
382 if (frm_mode == RKISP_LUMA_THREEFRM) {
383 if (luma_vdev->ystat_rdflg[0] && luma_vdev->ystat_rdflg[1] &&
384 luma_vdev->ystat_rdflg[2])
385 send_task = true;
386 } else if (frm_mode == RKISP_LUMA_TWOFRM) {
387 if (luma_vdev->ystat_rdflg[0] && luma_vdev->ystat_rdflg[2])
388 send_task = true;
389 } else {
390 if (luma_vdev->ystat_rdflg[2])
391 send_task = true;
392 }
393
394 if (send_task) {
395 luma_vdev->work.readout = RKISP_ISP_READOUT_LUMA;
396 luma_vdev->work.timestamp = ktime_get_ns();
397 luma_vdev->work.frame_id = cur_frame_id;
398
399 if (frm_mode == RKISP_LUMA_THREEFRM)
400 luma_vdev->work.meas_type = ISP2X_RAW0_Y_STATE | ISP2X_RAW1_Y_STATE |
401 ISP2X_RAW2_Y_STATE;
402 else if (frm_mode == RKISP_LUMA_TWOFRM)
403 luma_vdev->work.meas_type = ISP2X_RAW0_Y_STATE | ISP2X_RAW2_Y_STATE;
404 else
405 luma_vdev->work.meas_type = ISP2X_RAW2_Y_STATE;
406
407 if (!kfifo_is_full(&luma_vdev->rd_kfifo))
408 kfifo_in(&luma_vdev->rd_kfifo,
409 &luma_vdev->work, sizeof(luma_vdev->work));
410 else
411 v4l2_err(luma_vdev->vnode.vdev.v4l2_dev,
412 "stats kfifo is full\n");
413
414 tasklet_schedule(&luma_vdev->rd_tasklet);
415
416 for (i = 0; i < ISP2X_MIPI_RAW_MAX; i++) {
417 luma_vdev->ystat_isrcnt[i] = 0;
418 luma_vdev->ystat_rdflg[i] = false;
419 }
420
421 memset(&luma_vdev->work, 0, sizeof(luma_vdev->work));
422 }
423
424 unlock:
425 spin_unlock(&luma_vdev->irq_lock);
426 }
427
rkisp_init_luma_vdev(struct rkisp_luma_vdev * luma_vdev)428 static void rkisp_init_luma_vdev(struct rkisp_luma_vdev *luma_vdev)
429 {
430 luma_vdev->vdev_fmt.fmt.meta.dataformat =
431 V4L2_META_FMT_RK_ISP1_STAT_LUMA;
432 luma_vdev->vdev_fmt.fmt.meta.buffersize =
433 sizeof(struct rkisp_isp2x_luma_buffer);
434 }
435
rkisp_register_luma_vdev(struct rkisp_luma_vdev * luma_vdev,struct v4l2_device * v4l2_dev,struct rkisp_device * dev)436 int rkisp_register_luma_vdev(struct rkisp_luma_vdev *luma_vdev,
437 struct v4l2_device *v4l2_dev,
438 struct rkisp_device *dev)
439 {
440 int ret;
441 struct rkisp_vdev_node *node = &luma_vdev->vnode;
442 struct video_device *vdev = &node->vdev;
443 struct media_entity *source, *sink;
444
445 luma_vdev->dev = dev;
446 if (dev->isp_ver != ISP_V20)
447 return 0;
448
449 INIT_LIST_HEAD(&luma_vdev->stat);
450 spin_lock_init(&luma_vdev->irq_lock);
451 spin_lock_init(&luma_vdev->rd_lock);
452
453 strlcpy(vdev->name, "rkisp-mipi-luma", sizeof(vdev->name));
454
455 vdev->ioctl_ops = &rkisp_luma_ioctl;
456 vdev->fops = &rkisp_luma_fops;
457 vdev->release = video_device_release_empty;
458 vdev->lock = &dev->iqlock;
459 vdev->v4l2_dev = v4l2_dev;
460 vdev->queue = &node->buf_queue;
461 vdev->device_caps = V4L2_CAP_META_CAPTURE | V4L2_CAP_STREAMING;
462 vdev->vfl_dir = VFL_DIR_RX;
463 rkisp_luma_init_vb2_queue(vdev->queue, luma_vdev);
464 rkisp_init_luma_vdev(luma_vdev);
465 video_set_drvdata(vdev, luma_vdev);
466
467 node->pad.flags = MEDIA_PAD_FL_SINK;
468 ret = media_entity_pads_init(&vdev->entity, 1, &node->pad);
469 if (ret < 0)
470 goto err_release_queue;
471
472 ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
473 if (ret < 0) {
474 dev_err(&vdev->dev,
475 "could not register Video for Linux device\n");
476 goto err_cleanup_media_entity;
477 }
478
479 source = &dev->isp_sdev.sd.entity;
480 sink = &luma_vdev->vnode.vdev.entity;
481 ret = media_create_pad_link(source, RKISP_ISP_PAD_SOURCE_STATS,
482 sink, 0, MEDIA_LNK_FL_ENABLED);
483 if (ret < 0)
484 goto err_unregister_video;
485
486 ret = kfifo_alloc(&luma_vdev->rd_kfifo,
487 RKISP_LUMA_READOUT_WORK_SIZE,
488 GFP_KERNEL);
489 if (ret) {
490 dev_err(&vdev->dev,
491 "kfifo_alloc failed with error %d\n",
492 ret);
493 goto err_unregister_video;
494 }
495
496 tasklet_init(&luma_vdev->rd_tasklet,
497 rkisp_luma_readout_task,
498 (unsigned long)luma_vdev);
499 tasklet_disable(&luma_vdev->rd_tasklet);
500
501 return 0;
502
503 err_unregister_video:
504 video_unregister_device(vdev);
505 err_cleanup_media_entity:
506 media_entity_cleanup(&vdev->entity);
507 err_release_queue:
508 vb2_queue_release(vdev->queue);
509 return ret;
510 }
511
rkisp_unregister_luma_vdev(struct rkisp_luma_vdev * luma_vdev)512 void rkisp_unregister_luma_vdev(struct rkisp_luma_vdev *luma_vdev)
513 {
514 struct rkisp_vdev_node *node = &luma_vdev->vnode;
515 struct video_device *vdev = &node->vdev;
516
517 if (luma_vdev->dev->isp_ver != ISP_V20)
518 return;
519 kfifo_free(&luma_vdev->rd_kfifo);
520 tasklet_kill(&luma_vdev->rd_tasklet);
521 video_unregister_device(vdev);
522 media_entity_cleanup(&vdev->entity);
523 vb2_queue_release(vdev->queue);
524 }
525