• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020 Rockchip Electronics Co., Ltd. */
3 
4 #include <linux/kfifo.h>
5 #include <media/v4l2-common.h>
6 #include <media/v4l2-ioctl.h>
7 #include <media/videobuf2-core.h>
8 #include <media/videobuf2-vmalloc.h>
9 #include "dev.h"
10 #include "regs.h"
11 #include "cif-luma.h"
12 #include "mipi-csi2.h"
13 
14 #define RKCIF_LUMA_REQ_BUFS_MIN		2
15 #define RKCIF_LUMA_REQ_BUFS_MAX		8
16 #define SW_Y_STAT_RD_ID_MASK		GENMASK(5, 4)
17 #define SW_Y_STAT_RD_BLOCK_MASK		GENMASK(7, 6)
18 #define SW_Y_STAT_EN			BIT(0)
19 #define SW_Y_STAT_RD_EN			BIT(3)
20 #define SW_Y_STAT_BAYER_TYPE(a)		(((a) & 0x3) << 1)
21 #define SW_Y_STAT_RD_ID(a)		(((a) & 0x3) << 4)
22 #define SW_Y_STAT_RD_BLOCK(a)		(((a) & 0x3) << 6)
23 
rkcif_luma_enum_fmt_meta_cap(struct file * file,void * priv,struct v4l2_fmtdesc * f)24 static int rkcif_luma_enum_fmt_meta_cap(struct file *file, void *priv,
25 					struct v4l2_fmtdesc *f)
26 {
27 	struct video_device *video = video_devdata(file);
28 	struct rkcif_luma_vdev *luma_vdev = video_get_drvdata(video);
29 
30 	if (f->index > 0 || f->type != video->queue->type)
31 		return -EINVAL;
32 
33 	f->pixelformat = luma_vdev->vdev_fmt.fmt.meta.dataformat;
34 	return 0;
35 }
36 
rkcif_luma_g_fmt_meta_cap(struct file * file,void * priv,struct v4l2_format * f)37 static int rkcif_luma_g_fmt_meta_cap(struct file *file, void *priv,
38 				     struct v4l2_format *f)
39 {
40 	struct video_device *video = video_devdata(file);
41 	struct rkcif_luma_vdev *luma_vdev = video_get_drvdata(video);
42 	struct v4l2_meta_format *meta = &f->fmt.meta;
43 
44 	if (f->type != video->queue->type)
45 		return -EINVAL;
46 
47 	memset(meta, 0, sizeof(*meta));
48 	meta->dataformat = luma_vdev->vdev_fmt.fmt.meta.dataformat;
49 	meta->buffersize = luma_vdev->vdev_fmt.fmt.meta.buffersize;
50 
51 	return 0;
52 }
53 
rkcif_luma_querycap(struct file * file,void * priv,struct v4l2_capability * cap)54 static int rkcif_luma_querycap(struct file *file,
55 			       void *priv, struct v4l2_capability *cap)
56 {
57 	struct video_device *vdev = video_devdata(file);
58 	struct rkcif_luma_vdev *luma_vdev = video_get_drvdata(vdev);
59 	struct device *dev = luma_vdev->cifdev->dev;
60 
61 	strlcpy(cap->driver, dev->driver->name, sizeof(cap->driver));
62 	strlcpy(cap->card, dev->driver->name, sizeof(cap->card));
63 	snprintf(cap->bus_info, sizeof(cap->bus_info),
64 		 "platform:%s", dev_name(dev));
65 
66 	return 0;
67 }
68 
69 /* ISP video device IOCTLs */
70 static const struct v4l2_ioctl_ops rkcif_luma_ioctl = {
71 	.vidioc_reqbufs = vb2_ioctl_reqbufs,
72 	.vidioc_querybuf = vb2_ioctl_querybuf,
73 	.vidioc_create_bufs = vb2_ioctl_create_bufs,
74 	.vidioc_qbuf = vb2_ioctl_qbuf,
75 	.vidioc_dqbuf = vb2_ioctl_dqbuf,
76 	.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
77 	.vidioc_expbuf = vb2_ioctl_expbuf,
78 	.vidioc_streamon = vb2_ioctl_streamon,
79 	.vidioc_streamoff = vb2_ioctl_streamoff,
80 	.vidioc_enum_fmt_meta_cap = rkcif_luma_enum_fmt_meta_cap,
81 	.vidioc_g_fmt_meta_cap = rkcif_luma_g_fmt_meta_cap,
82 	.vidioc_s_fmt_meta_cap = rkcif_luma_g_fmt_meta_cap,
83 	.vidioc_try_fmt_meta_cap = rkcif_luma_g_fmt_meta_cap,
84 	.vidioc_querycap = rkcif_luma_querycap
85 };
86 
rkcif_luma_fh_open(struct file * filp)87 static int rkcif_luma_fh_open(struct file *filp)
88 {
89 	struct rkcif_luma_vdev *params = video_drvdata(filp);
90 	int ret;
91 
92 	ret = v4l2_fh_open(filp);
93 	if (!ret) {
94 		ret = v4l2_pipeline_pm_get(&params->vnode.vdev.entity);
95 		if (ret < 0)
96 			vb2_fop_release(filp);
97 	}
98 
99 	return ret;
100 }
101 
rkcif_luma_fop_release(struct file * file)102 static int rkcif_luma_fop_release(struct file *file)
103 {
104 	struct rkcif_luma_vdev *luma = video_drvdata(file);
105 	int ret;
106 
107 	ret = vb2_fop_release(file);
108 	if (!ret)
109 		v4l2_pipeline_pm_put(&luma->vnode.vdev.entity);
110 	return ret;
111 }
112 
113 struct v4l2_file_operations rkcif_luma_fops = {
114 	.mmap = vb2_fop_mmap,
115 	.unlocked_ioctl = video_ioctl2,
116 	.poll = vb2_fop_poll,
117 	.open = rkcif_luma_fh_open,
118 	.release = rkcif_luma_fop_release
119 };
120 
rkcif_luma_vb2_queue_setup(struct vb2_queue * vq,unsigned int * num_buffers,unsigned int * num_planes,unsigned int sizes[],struct device * alloc_ctxs[])121 static int rkcif_luma_vb2_queue_setup(struct vb2_queue *vq,
122 				      unsigned int *num_buffers,
123 				      unsigned int *num_planes,
124 				      unsigned int sizes[],
125 				      struct device *alloc_ctxs[])
126 {
127 	struct rkcif_luma_vdev *luma_vdev = vq->drv_priv;
128 
129 	*num_planes = 1;
130 
131 	*num_buffers = clamp_t(u32, *num_buffers, RKCIF_LUMA_REQ_BUFS_MIN,
132 			       RKCIF_LUMA_REQ_BUFS_MAX);
133 
134 	sizes[0] = sizeof(struct rkisp_isp2x_luma_buffer);
135 
136 	INIT_LIST_HEAD(&luma_vdev->stat);
137 
138 	return 0;
139 }
140 
rkcif_luma_vb2_buf_queue(struct vb2_buffer * vb)141 static void rkcif_luma_vb2_buf_queue(struct vb2_buffer *vb)
142 {
143 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
144 	struct rkcif_buffer *luma_buf = to_rkcif_buffer(vbuf);
145 	struct vb2_queue *vq = vb->vb2_queue;
146 	struct rkcif_luma_vdev *luma_dev = vq->drv_priv;
147 
148 	luma_buf->vaddr[0] = vb2_plane_vaddr(vb, 0);
149 
150 	spin_lock_bh(&luma_dev->rd_lock);
151 	list_add_tail(&luma_buf->queue, &luma_dev->stat);
152 	spin_unlock_bh(&luma_dev->rd_lock);
153 }
154 
rkcif_luma_vb2_stop_streaming(struct vb2_queue * vq)155 static void rkcif_luma_vb2_stop_streaming(struct vb2_queue *vq)
156 {
157 	struct rkcif_luma_vdev *luma_vdev = vq->drv_priv;
158 	struct rkcif_buffer *buf;
159 	unsigned long flags;
160 	int i;
161 
162 	/* Make sure no new work queued in isr before draining wq */
163 	spin_lock_irqsave(&luma_vdev->irq_lock, flags);
164 	luma_vdev->streamon = false;
165 	spin_unlock_irqrestore(&luma_vdev->irq_lock, flags);
166 
167 	tasklet_disable(&luma_vdev->rd_tasklet);
168 
169 	spin_lock_bh(&luma_vdev->rd_lock);
170 	for (i = 0; i < RKCIF_LUMA_REQ_BUFS_MAX; i++) {
171 		if (list_empty(&luma_vdev->stat))
172 			break;
173 		buf = list_first_entry(&luma_vdev->stat,
174 				       struct rkcif_buffer, queue);
175 		list_del(&buf->queue);
176 		vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
177 	}
178 	spin_unlock_bh(&luma_vdev->rd_lock);
179 }
180 
181 static int
rkcif_luma_vb2_start_streaming(struct vb2_queue * queue,unsigned int count)182 rkcif_luma_vb2_start_streaming(struct vb2_queue *queue,
183 			       unsigned int count)
184 {
185 	struct rkcif_luma_vdev *luma_vdev = queue->drv_priv;
186 	u32 i;
187 
188 	for (i = 0; i < RKCIF_RAW_MAX; i++)
189 		luma_vdev->ystat_rdflg[i] = false;
190 
191 	luma_vdev->streamon = true;
192 	kfifo_reset(&luma_vdev->rd_kfifo);
193 	tasklet_enable(&luma_vdev->rd_tasklet);
194 
195 	return 0;
196 }
197 
198 static struct vb2_ops rkcif_luma_vb2_ops = {
199 	.queue_setup = rkcif_luma_vb2_queue_setup,
200 	.buf_queue = rkcif_luma_vb2_buf_queue,
201 	.wait_prepare = vb2_ops_wait_prepare,
202 	.wait_finish = vb2_ops_wait_finish,
203 	.stop_streaming = rkcif_luma_vb2_stop_streaming,
204 	.start_streaming = rkcif_luma_vb2_start_streaming,
205 };
206 
rkcif_luma_init_vb2_queue(struct vb2_queue * q,struct rkcif_luma_vdev * luma_vdev)207 static int rkcif_luma_init_vb2_queue(struct vb2_queue *q,
208 				     struct rkcif_luma_vdev *luma_vdev)
209 {
210 	q->type = V4L2_BUF_TYPE_META_CAPTURE;
211 	q->io_modes = VB2_MMAP | VB2_USERPTR;
212 	q->drv_priv = luma_vdev;
213 	q->ops = &rkcif_luma_vb2_ops;
214 	q->mem_ops = &vb2_vmalloc_memops;
215 	q->buf_struct_size = sizeof(struct rkcif_buffer);
216 	q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
217 	q->lock = &luma_vdev->vnode.vlock;
218 	q->dev = luma_vdev->cifdev->dev;
219 
220 	return vb2_queue_init(q);
221 }
222 
223 static void
rkcif_stats_send_luma(struct rkcif_luma_vdev * vdev,struct rkcif_luma_readout_work * work)224 rkcif_stats_send_luma(struct rkcif_luma_vdev *vdev,
225 		      struct rkcif_luma_readout_work *work)
226 {
227 	unsigned int cur_frame_id;
228 	struct rkisp_isp2x_luma_buffer *cur_stat_buf;
229 	struct rkcif_buffer *cur_buf = NULL;
230 	u32 i, j;
231 
232 	spin_lock(&vdev->rd_lock);
233 	/* get one empty buffer */
234 	if (!list_empty(&vdev->stat)) {
235 		cur_buf = list_first_entry(&vdev->stat,
236 					   struct rkcif_buffer, queue);
237 		list_del(&cur_buf->queue);
238 	}
239 	spin_unlock(&vdev->rd_lock);
240 
241 	if (!cur_buf) {
242 		v4l2_warn(vdev->vnode.vdev.v4l2_dev,
243 			  "no luma buffer available\n");
244 		return;
245 	}
246 
247 	cur_stat_buf =
248 		(struct rkisp_isp2x_luma_buffer *)(cur_buf->vaddr[0]);
249 	if (!cur_stat_buf) {
250 		v4l2_err(vdev->vnode.vdev.v4l2_dev,
251 			 "cur_stat_buf is NULL\n");
252 		return;
253 	}
254 
255 	cur_stat_buf->frame_id = work->frame_id;
256 	cur_stat_buf->meas_type = work->meas_type;
257 	for (i = 0; i < RKCIF_RAW_MAX; i++) {
258 		for (j = 0; j < ISP2X_MIPI_LUMA_MEAN_MAX; j++) {
259 			cur_stat_buf->luma[i].exp_mean[j] =
260 				work->luma[i].exp_mean[j];
261 		}
262 	}
263 
264 	cur_frame_id = cur_stat_buf->frame_id;
265 	vb2_set_plane_payload(&cur_buf->vb.vb2_buf, 0,
266 			      sizeof(struct rkisp_isp2x_luma_buffer));
267 	cur_buf->vb.sequence = cur_frame_id;
268 	cur_buf->vb.vb2_buf.timestamp = work->timestamp;
269 	vb2_buffer_done(&cur_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
270 }
271 
rkcif_luma_readout_task(unsigned long data)272 static void rkcif_luma_readout_task(unsigned long data)
273 {
274 	unsigned int out = 0;
275 	struct rkcif_luma_readout_work work;
276 	struct rkcif_luma_vdev *vdev =
277 		(struct rkcif_luma_vdev *)data;
278 
279 	while (!kfifo_is_empty(&vdev->rd_kfifo)) {
280 		out = kfifo_out(&vdev->rd_kfifo,
281 				&work, sizeof(work));
282 		if (!out)
283 			break;
284 
285 		if (work.readout == RKCIF_READOUT_LUMA)
286 			rkcif_stats_send_luma(vdev, &work);
287 	}
288 }
289 
rkcif_luma_isr(struct rkcif_luma_vdev * luma_vdev,int mipi_id,u32 frame_id)290 void rkcif_luma_isr(struct rkcif_luma_vdev *luma_vdev, int mipi_id, u32 frame_id)
291 {
292 	u8 hdr_mode = luma_vdev->cifdev->hdr.hdr_mode;
293 	enum rkcif_luma_frm_mode frm_mode;
294 	bool send_task;
295 	u32 i, value;
296 
297 	spin_lock(&luma_vdev->irq_lock);
298 	if (!luma_vdev->streamon)
299 		goto unlock;
300 
301 	switch (hdr_mode) {
302 	case NO_HDR:
303 		frm_mode = RKCIF_LUMA_ONEFRM;
304 		break;
305 	case HDR_X2:
306 		frm_mode = RKCIF_LUMA_TWOFRM;
307 		break;
308 	case HDR_X3:
309 		frm_mode = RKCIF_LUMA_THREEFRM;
310 		break;
311 	default:
312 		goto unlock;
313 	}
314 
315 	if (mipi_id == RKCIF_STREAM_MIPI_ID0 && !luma_vdev->ystat_rdflg[0]) {
316 		value = rkcif_read_register(luma_vdev->cifdev, CIF_REG_Y_STAT_CONTROL);
317 		value &= ~(SW_Y_STAT_RD_ID_MASK | SW_Y_STAT_RD_BLOCK_MASK);
318 		value |= SW_Y_STAT_RD_ID(0x0) | SW_Y_STAT_RD_EN;
319 		rkcif_write_register(luma_vdev->cifdev, CIF_REG_Y_STAT_CONTROL, value);
320 		for (i = 0; i < ISP2X_MIPI_LUMA_MEAN_MAX; i++)
321 			luma_vdev->work.luma[0].exp_mean[i] =
322 				rkcif_read_register(luma_vdev->cifdev, CIF_REG_Y_STAT_VALUE);
323 
324 		luma_vdev->ystat_rdflg[0] = true;
325 	}
326 	if (mipi_id == RKCIF_STREAM_MIPI_ID1 && !luma_vdev->ystat_rdflg[1]) {
327 		value = rkcif_read_register(luma_vdev->cifdev, CIF_REG_Y_STAT_CONTROL);
328 		value &= ~(SW_Y_STAT_RD_ID_MASK | SW_Y_STAT_RD_BLOCK_MASK);
329 		value |= SW_Y_STAT_RD_ID(0x1) | SW_Y_STAT_RD_EN;
330 		rkcif_write_register(luma_vdev->cifdev, CIF_REG_Y_STAT_CONTROL, value);
331 		for (i = 0; i < ISP2X_MIPI_LUMA_MEAN_MAX; i++)
332 			luma_vdev->work.luma[1].exp_mean[i] =
333 				rkcif_read_register(luma_vdev->cifdev, CIF_REG_Y_STAT_VALUE);
334 
335 		luma_vdev->ystat_rdflg[1] = true;
336 	}
337 	if (mipi_id == RKCIF_STREAM_MIPI_ID2 && !luma_vdev->ystat_rdflg[2]) {
338 		value = rkcif_read_register(luma_vdev->cifdev, CIF_REG_Y_STAT_CONTROL);
339 		value &= ~(SW_Y_STAT_RD_ID_MASK | SW_Y_STAT_RD_BLOCK_MASK);
340 		value |= SW_Y_STAT_RD_ID(0x2) | SW_Y_STAT_RD_EN;
341 		rkcif_write_register(luma_vdev->cifdev, CIF_REG_Y_STAT_CONTROL, value);
342 		for (i = 0; i < ISP2X_MIPI_LUMA_MEAN_MAX; i++)
343 			luma_vdev->work.luma[2].exp_mean[i] =
344 				rkcif_read_register(luma_vdev->cifdev, CIF_REG_Y_STAT_VALUE);
345 
346 		luma_vdev->ystat_rdflg[2] = true;
347 	}
348 
349 	send_task = false;
350 	if (frm_mode == RKCIF_LUMA_THREEFRM) {
351 		if (luma_vdev->ystat_rdflg[0] && luma_vdev->ystat_rdflg[1] &&
352 		    luma_vdev->ystat_rdflg[2])
353 			send_task = true;
354 	} else if (frm_mode == RKCIF_LUMA_TWOFRM) {
355 		if (luma_vdev->ystat_rdflg[0] && luma_vdev->ystat_rdflg[1])
356 			send_task = true;
357 	} else {
358 		if (luma_vdev->ystat_rdflg[0])
359 			send_task = true;
360 	}
361 
362 	if (send_task) {
363 		luma_vdev->work.readout = RKCIF_READOUT_LUMA;
364 		luma_vdev->work.timestamp = ktime_get_ns();
365 		luma_vdev->work.frame_id = frame_id;
366 
367 		if (frm_mode == RKCIF_LUMA_THREEFRM)
368 			luma_vdev->work.meas_type = ISP2X_RAW0_Y_STATE | ISP2X_RAW1_Y_STATE |
369 						    ISP2X_RAW2_Y_STATE;
370 		else if (frm_mode == RKCIF_LUMA_TWOFRM)
371 			luma_vdev->work.meas_type = ISP2X_RAW0_Y_STATE | ISP2X_RAW1_Y_STATE;
372 		else
373 			luma_vdev->work.meas_type = ISP2X_RAW0_Y_STATE;
374 
375 		if (!kfifo_is_full(&luma_vdev->rd_kfifo))
376 			kfifo_in(&luma_vdev->rd_kfifo,
377 				 &luma_vdev->work, sizeof(luma_vdev->work));
378 		else
379 			v4l2_err(luma_vdev->vnode.vdev.v4l2_dev,
380 				 "stats kfifo is full\n");
381 
382 		tasklet_schedule(&luma_vdev->rd_tasklet);
383 
384 		for (i = 0; i < RKCIF_RAW_MAX; i++)
385 			luma_vdev->ystat_rdflg[i] = false;
386 
387 		memset(&luma_vdev->work, 0, sizeof(luma_vdev->work));
388 	}
389 
390 unlock:
391 	spin_unlock(&luma_vdev->irq_lock);
392 }
393 
rkcif_start_luma(struct rkcif_luma_vdev * luma_vdev,const struct cif_input_fmt * cif_fmt_in)394 void rkcif_start_luma(struct rkcif_luma_vdev *luma_vdev, const struct cif_input_fmt *cif_fmt_in)
395 {
396 	u32 bayer = 0;
397 
398 	if (cif_fmt_in->fmt_type != CIF_FMT_TYPE_RAW)
399 		return;
400 
401 	switch (cif_fmt_in->mbus_code) {
402 	case MEDIA_BUS_FMT_SBGGR8_1X8:
403 	case MEDIA_BUS_FMT_SBGGR10_1X10:
404 	case MEDIA_BUS_FMT_SBGGR12_1X12:
405 		bayer = 3;
406 		break;
407 	case MEDIA_BUS_FMT_SGBRG8_1X8:
408 	case MEDIA_BUS_FMT_SGBRG10_1X10:
409 	case MEDIA_BUS_FMT_SGBRG12_1X12:
410 		bayer = 2;
411 		break;
412 	case MEDIA_BUS_FMT_SGRBG8_1X8:
413 	case MEDIA_BUS_FMT_SGRBG10_1X10:
414 	case MEDIA_BUS_FMT_SGRBG12_1X12:
415 		bayer = 1;
416 		break;
417 	case MEDIA_BUS_FMT_SRGGB8_1X8:
418 	case MEDIA_BUS_FMT_SRGGB10_1X10:
419 	case MEDIA_BUS_FMT_SRGGB12_1X12:
420 		bayer = 0;
421 		break;
422 	}
423 
424 	rkcif_write_register(luma_vdev->cifdev, CIF_REG_Y_STAT_CONTROL,
425 			     SW_Y_STAT_BAYER_TYPE(bayer) | SW_Y_STAT_EN);
426 	luma_vdev->enable = true;
427 }
428 
rkcif_stop_luma(struct rkcif_luma_vdev * luma_vdev)429 void rkcif_stop_luma(struct rkcif_luma_vdev *luma_vdev)
430 {
431 	rkcif_write_register(luma_vdev->cifdev, CIF_REG_Y_STAT_CONTROL, 0x0);
432 	luma_vdev->enable = false;
433 }
434 
rkcif_init_luma_vdev(struct rkcif_luma_vdev * luma_vdev)435 static void rkcif_init_luma_vdev(struct rkcif_luma_vdev *luma_vdev)
436 {
437 	luma_vdev->vdev_fmt.fmt.meta.dataformat =
438 		V4L2_META_FMT_RK_ISP1_STAT_LUMA;
439 	luma_vdev->vdev_fmt.fmt.meta.buffersize =
440 		sizeof(struct rkisp_isp2x_luma_buffer);
441 }
442 
rkcif_register_luma_vdev(struct rkcif_luma_vdev * luma_vdev,struct v4l2_device * v4l2_dev,struct rkcif_device * dev)443 int rkcif_register_luma_vdev(struct rkcif_luma_vdev *luma_vdev,
444 			     struct v4l2_device *v4l2_dev,
445 			     struct rkcif_device *dev)
446 {
447 	int ret;
448 	struct rkcif_luma_node *node = &luma_vdev->vnode;
449 	struct video_device *vdev = &node->vdev;
450 
451 	luma_vdev->cifdev = dev;
452 
453 	INIT_LIST_HEAD(&luma_vdev->stat);
454 	spin_lock_init(&luma_vdev->irq_lock);
455 	spin_lock_init(&luma_vdev->rd_lock);
456 
457 	strlcpy(vdev->name, "rkcif-mipi-luma", sizeof(vdev->name));
458 	mutex_init(&node->vlock);
459 
460 	vdev->ioctl_ops = &rkcif_luma_ioctl;
461 	vdev->fops = &rkcif_luma_fops;
462 	vdev->release = video_device_release_empty;
463 	vdev->lock = &node->vlock;
464 	vdev->v4l2_dev = v4l2_dev;
465 	vdev->queue = &node->buf_queue;
466 	vdev->device_caps = V4L2_CAP_META_CAPTURE | V4L2_CAP_STREAMING;
467 	vdev->vfl_dir =  VFL_DIR_RX;
468 	rkcif_luma_init_vb2_queue(vdev->queue, luma_vdev);
469 	rkcif_init_luma_vdev(luma_vdev);
470 	video_set_drvdata(vdev, luma_vdev);
471 
472 	node->pad.flags = MEDIA_PAD_FL_SINK;
473 	ret = media_entity_pads_init(&vdev->entity, 0, &node->pad);
474 	if (ret < 0)
475 		goto err_release_queue;
476 
477 	ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
478 	if (ret < 0) {
479 		dev_err(&vdev->dev,
480 			"could not register Video for Linux device\n");
481 		goto err_cleanup_media_entity;
482 	}
483 
484 	ret = kfifo_alloc(&luma_vdev->rd_kfifo,
485 			  RKCIF_LUMA_READOUT_WORK_SIZE,
486 			  GFP_KERNEL);
487 	if (ret) {
488 		dev_err(&vdev->dev,
489 			"kfifo_alloc failed with error %d\n",
490 			ret);
491 		goto err_unregister_video;
492 	}
493 
494 	tasklet_init(&luma_vdev->rd_tasklet,
495 		     rkcif_luma_readout_task,
496 		     (unsigned long)luma_vdev);
497 	tasklet_disable(&luma_vdev->rd_tasklet);
498 
499 	return 0;
500 
501 err_unregister_video:
502 	video_unregister_device(vdev);
503 err_cleanup_media_entity:
504 	media_entity_cleanup(&vdev->entity);
505 err_release_queue:
506 	vb2_queue_release(vdev->queue);
507 	return ret;
508 }
509 
rkcif_unregister_luma_vdev(struct rkcif_luma_vdev * luma_vdev)510 void rkcif_unregister_luma_vdev(struct rkcif_luma_vdev *luma_vdev)
511 {
512 	struct rkcif_luma_node *node = &luma_vdev->vnode;
513 	struct video_device *vdev = &node->vdev;
514 
515 	kfifo_free(&luma_vdev->rd_kfifo);
516 	tasklet_kill(&luma_vdev->rd_tasklet);
517 	video_unregister_device(vdev);
518 	media_entity_cleanup(&vdev->entity);
519 	vb2_queue_release(vdev->queue);
520 }
521