• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Driver for STM32 Digital Camera Memory Interface
3  *
4  * Copyright (C) STMicroelectronics SA 2017
5  * Authors: Yannick Fertre <yannick.fertre@st.com>
6  *          Hugues Fruchet <hugues.fruchet@st.com>
7  *          for STMicroelectronics.
8  * License terms:  GNU General Public License (GPL), version 2
9  *
10  * This driver is based on atmel_isi.c
11  *
12  */
13 
14 #include <linux/clk.h>
15 #include <linux/completion.h>
16 #include <linux/delay.h>
17 #include <linux/dmaengine.h>
18 #include <linux/init.h>
19 #include <linux/interrupt.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/of.h>
23 #include <linux/of_device.h>
24 #include <linux/of_graph.h>
25 #include <linux/platform_device.h>
26 #include <linux/reset.h>
27 #include <linux/videodev2.h>
28 
29 #include <media/v4l2-ctrls.h>
30 #include <media/v4l2-dev.h>
31 #include <media/v4l2-device.h>
32 #include <media/v4l2-event.h>
33 #include <media/v4l2-fwnode.h>
34 #include <media/v4l2-image-sizes.h>
35 #include <media/v4l2-ioctl.h>
36 #include <media/v4l2-rect.h>
37 #include <media/videobuf2-dma-contig.h>
38 
39 #define DRV_NAME "stm32-dcmi"
40 
41 /* Registers offset for DCMI */
42 #define DCMI_CR		0x00 /* Control Register */
43 #define DCMI_SR		0x04 /* Status Register */
44 #define DCMI_RIS	0x08 /* Raw Interrupt Status register */
45 #define DCMI_IER	0x0C /* Interrupt Enable Register */
46 #define DCMI_MIS	0x10 /* Masked Interrupt Status register */
47 #define DCMI_ICR	0x14 /* Interrupt Clear Register */
48 #define DCMI_ESCR	0x18 /* Embedded Synchronization Code Register */
49 #define DCMI_ESUR	0x1C /* Embedded Synchronization Unmask Register */
50 #define DCMI_CWSTRT	0x20 /* Crop Window STaRT */
51 #define DCMI_CWSIZE	0x24 /* Crop Window SIZE */
52 #define DCMI_DR		0x28 /* Data Register */
53 #define DCMI_IDR	0x2C /* IDentifier Register */
54 
55 /* Bits definition for control register (DCMI_CR) */
56 #define CR_CAPTURE	BIT(0)
57 #define CR_CM		BIT(1)
58 #define CR_CROP		BIT(2)
59 #define CR_JPEG		BIT(3)
60 #define CR_ESS		BIT(4)
61 #define CR_PCKPOL	BIT(5)
62 #define CR_HSPOL	BIT(6)
63 #define CR_VSPOL	BIT(7)
64 #define CR_FCRC_0	BIT(8)
65 #define CR_FCRC_1	BIT(9)
66 #define CR_EDM_0	BIT(10)
67 #define CR_EDM_1	BIT(11)
68 #define CR_ENABLE	BIT(14)
69 
70 /* Bits definition for status register (DCMI_SR) */
71 #define SR_HSYNC	BIT(0)
72 #define SR_VSYNC	BIT(1)
73 #define SR_FNE		BIT(2)
74 
75 /*
76  * Bits definition for interrupt registers
77  * (DCMI_RIS, DCMI_IER, DCMI_MIS, DCMI_ICR)
78  */
79 #define IT_FRAME	BIT(0)
80 #define IT_OVR		BIT(1)
81 #define IT_ERR		BIT(2)
82 #define IT_VSYNC	BIT(3)
83 #define IT_LINE		BIT(4)
84 
85 enum state {
86 	STOPPED = 0,
87 	RUNNING,
88 	STOPPING,
89 };
90 
91 #define MIN_WIDTH	16U
92 #define MAX_WIDTH	2048U
93 #define MIN_HEIGHT	16U
94 #define MAX_HEIGHT	2048U
95 
96 #define TIMEOUT_MS	1000
97 
98 struct dcmi_graph_entity {
99 	struct device_node *node;
100 
101 	struct v4l2_async_subdev asd;
102 	struct v4l2_subdev *subdev;
103 };
104 
105 struct dcmi_format {
106 	u32	fourcc;
107 	u32	mbus_code;
108 	u8	bpp;
109 };
110 
111 struct dcmi_framesize {
112 	u32	width;
113 	u32	height;
114 };
115 
116 struct dcmi_buf {
117 	struct vb2_v4l2_buffer	vb;
118 	bool			prepared;
119 	dma_addr_t		paddr;
120 	size_t			size;
121 	struct list_head	list;
122 };
123 
124 struct stm32_dcmi {
125 	/* Protects the access of variables shared within the interrupt */
126 	spinlock_t			irqlock;
127 	struct device			*dev;
128 	void __iomem			*regs;
129 	struct resource			*res;
130 	struct reset_control		*rstc;
131 	int				sequence;
132 	struct list_head		buffers;
133 	struct dcmi_buf			*active;
134 
135 	struct v4l2_device		v4l2_dev;
136 	struct video_device		*vdev;
137 	struct v4l2_async_notifier	notifier;
138 	struct dcmi_graph_entity	entity;
139 	struct v4l2_format		fmt;
140 	struct v4l2_rect		crop;
141 	bool				do_crop;
142 
143 	const struct dcmi_format	**sd_formats;
144 	unsigned int			num_of_sd_formats;
145 	const struct dcmi_format	*sd_format;
146 	struct dcmi_framesize		*sd_framesizes;
147 	unsigned int			num_of_sd_framesizes;
148 	struct dcmi_framesize		sd_framesize;
149 	struct v4l2_rect		sd_bounds;
150 
151 	/* Protect this data structure */
152 	struct mutex			lock;
153 	struct vb2_queue		queue;
154 
155 	struct v4l2_fwnode_bus_parallel	bus;
156 	struct completion		complete;
157 	struct clk			*mclk;
158 	enum state			state;
159 	struct dma_chan			*dma_chan;
160 	dma_cookie_t			dma_cookie;
161 	u32				misr;
162 	int				errors_count;
163 	int				buffers_count;
164 
165 	/* Ensure DMA operations atomicity */
166 	struct mutex			dma_lock;
167 };
168 
notifier_to_dcmi(struct v4l2_async_notifier * n)169 static inline struct stm32_dcmi *notifier_to_dcmi(struct v4l2_async_notifier *n)
170 {
171 	return container_of(n, struct stm32_dcmi, notifier);
172 }
173 
reg_read(void __iomem * base,u32 reg)174 static inline u32 reg_read(void __iomem *base, u32 reg)
175 {
176 	return readl_relaxed(base + reg);
177 }
178 
reg_write(void __iomem * base,u32 reg,u32 val)179 static inline void reg_write(void __iomem *base, u32 reg, u32 val)
180 {
181 	writel_relaxed(val, base + reg);
182 }
183 
reg_set(void __iomem * base,u32 reg,u32 mask)184 static inline void reg_set(void __iomem *base, u32 reg, u32 mask)
185 {
186 	reg_write(base, reg, reg_read(base, reg) | mask);
187 }
188 
reg_clear(void __iomem * base,u32 reg,u32 mask)189 static inline void reg_clear(void __iomem *base, u32 reg, u32 mask)
190 {
191 	reg_write(base, reg, reg_read(base, reg) & ~mask);
192 }
193 
194 static int dcmi_start_capture(struct stm32_dcmi *dcmi);
195 
dcmi_dma_callback(void * param)196 static void dcmi_dma_callback(void *param)
197 {
198 	struct stm32_dcmi *dcmi = (struct stm32_dcmi *)param;
199 	struct dma_chan *chan = dcmi->dma_chan;
200 	struct dma_tx_state state;
201 	enum dma_status status;
202 
203 	spin_lock(&dcmi->irqlock);
204 
205 	/* Check DMA status */
206 	status = dmaengine_tx_status(chan, dcmi->dma_cookie, &state);
207 
208 	switch (status) {
209 	case DMA_IN_PROGRESS:
210 		dev_dbg(dcmi->dev, "%s: Received DMA_IN_PROGRESS\n", __func__);
211 		break;
212 	case DMA_PAUSED:
213 		dev_err(dcmi->dev, "%s: Received DMA_PAUSED\n", __func__);
214 		break;
215 	case DMA_ERROR:
216 		dev_err(dcmi->dev, "%s: Received DMA_ERROR\n", __func__);
217 		break;
218 	case DMA_COMPLETE:
219 		dev_dbg(dcmi->dev, "%s: Received DMA_COMPLETE\n", __func__);
220 
221 		if (dcmi->active) {
222 			struct dcmi_buf *buf = dcmi->active;
223 			struct vb2_v4l2_buffer *vbuf = &dcmi->active->vb;
224 
225 			vbuf->sequence = dcmi->sequence++;
226 			vbuf->field = V4L2_FIELD_NONE;
227 			vbuf->vb2_buf.timestamp = ktime_get_ns();
228 			vb2_set_plane_payload(&vbuf->vb2_buf, 0, buf->size);
229 			vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE);
230 			dev_dbg(dcmi->dev, "buffer[%d] done seq=%d\n",
231 				vbuf->vb2_buf.index, vbuf->sequence);
232 
233 			dcmi->buffers_count++;
234 			dcmi->active = NULL;
235 		}
236 
237 		/* Restart a new DMA transfer with next buffer */
238 		if (dcmi->state == RUNNING) {
239 			if (list_empty(&dcmi->buffers)) {
240 				dev_err(dcmi->dev, "%s: No more buffer queued, cannot capture buffer",
241 					__func__);
242 				dcmi->errors_count++;
243 				dcmi->active = NULL;
244 
245 				spin_unlock(&dcmi->irqlock);
246 				return;
247 			}
248 
249 			dcmi->active = list_entry(dcmi->buffers.next,
250 						  struct dcmi_buf, list);
251 
252 			list_del_init(&dcmi->active->list);
253 
254 			if (dcmi_start_capture(dcmi)) {
255 				dev_err(dcmi->dev, "%s: Cannot restart capture on DMA complete",
256 					__func__);
257 
258 				spin_unlock(&dcmi->irqlock);
259 				return;
260 			}
261 
262 			/* Enable capture */
263 			reg_set(dcmi->regs, DCMI_CR, CR_CAPTURE);
264 		}
265 
266 		break;
267 	default:
268 		dev_err(dcmi->dev, "%s: Received unknown status\n", __func__);
269 		break;
270 	}
271 
272 	spin_unlock(&dcmi->irqlock);
273 }
274 
dcmi_start_dma(struct stm32_dcmi * dcmi,struct dcmi_buf * buf)275 static int dcmi_start_dma(struct stm32_dcmi *dcmi,
276 			  struct dcmi_buf *buf)
277 {
278 	struct dma_async_tx_descriptor *desc = NULL;
279 	struct dma_slave_config config;
280 	int ret;
281 
282 	memset(&config, 0, sizeof(config));
283 
284 	config.src_addr = (dma_addr_t)dcmi->res->start + DCMI_DR;
285 	config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
286 	config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
287 	config.dst_maxburst = 4;
288 
289 	/* Configure DMA channel */
290 	ret = dmaengine_slave_config(dcmi->dma_chan, &config);
291 	if (ret < 0) {
292 		dev_err(dcmi->dev, "%s: DMA channel config failed (%d)\n",
293 			__func__, ret);
294 		return ret;
295 	}
296 
297 	/*
298 	 * Avoid call of dmaengine_terminate_all() between
299 	 * dmaengine_prep_slave_single() and dmaengine_submit()
300 	 * by locking the whole DMA submission sequence
301 	 */
302 	mutex_lock(&dcmi->dma_lock);
303 
304 	/* Prepare a DMA transaction */
305 	desc = dmaengine_prep_slave_single(dcmi->dma_chan, buf->paddr,
306 					   buf->size,
307 					   DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
308 	if (!desc) {
309 		dev_err(dcmi->dev, "%s: DMA dmaengine_prep_slave_single failed for buffer size %zu\n",
310 			__func__, buf->size);
311 		mutex_unlock(&dcmi->dma_lock);
312 		return -EINVAL;
313 	}
314 
315 	/* Set completion callback routine for notification */
316 	desc->callback = dcmi_dma_callback;
317 	desc->callback_param = dcmi;
318 
319 	/* Push current DMA transaction in the pending queue */
320 	dcmi->dma_cookie = dmaengine_submit(desc);
321 	if (dma_submit_error(dcmi->dma_cookie)) {
322 		dev_err(dcmi->dev, "%s: DMA submission failed\n", __func__);
323 		mutex_unlock(&dcmi->dma_lock);
324 		return -ENXIO;
325 	}
326 
327 	mutex_unlock(&dcmi->dma_lock);
328 
329 	dma_async_issue_pending(dcmi->dma_chan);
330 
331 	return 0;
332 }
333 
dcmi_start_capture(struct stm32_dcmi * dcmi)334 static int dcmi_start_capture(struct stm32_dcmi *dcmi)
335 {
336 	int ret;
337 	struct dcmi_buf *buf = dcmi->active;
338 
339 	if (!buf)
340 		return -EINVAL;
341 
342 	ret = dcmi_start_dma(dcmi, buf);
343 	if (ret) {
344 		dcmi->errors_count++;
345 		return ret;
346 	}
347 
348 	/* Enable capture */
349 	reg_set(dcmi->regs, DCMI_CR, CR_CAPTURE);
350 
351 	return 0;
352 }
353 
dcmi_set_crop(struct stm32_dcmi * dcmi)354 static void dcmi_set_crop(struct stm32_dcmi *dcmi)
355 {
356 	u32 size, start;
357 
358 	/* Crop resolution */
359 	size = ((dcmi->crop.height - 1) << 16) |
360 		((dcmi->crop.width << 1) - 1);
361 	reg_write(dcmi->regs, DCMI_CWSIZE, size);
362 
363 	/* Crop start point */
364 	start = ((dcmi->crop.top) << 16) |
365 		 ((dcmi->crop.left << 1));
366 	reg_write(dcmi->regs, DCMI_CWSTRT, start);
367 
368 	dev_dbg(dcmi->dev, "Cropping to %ux%u@%u:%u\n",
369 		dcmi->crop.width, dcmi->crop.height,
370 		dcmi->crop.left, dcmi->crop.top);
371 
372 	/* Enable crop */
373 	reg_set(dcmi->regs, DCMI_CR, CR_CROP);
374 }
375 
dcmi_irq_thread(int irq,void * arg)376 static irqreturn_t dcmi_irq_thread(int irq, void *arg)
377 {
378 	struct stm32_dcmi *dcmi = arg;
379 
380 	spin_lock(&dcmi->irqlock);
381 
382 	/* Stop capture is required */
383 	if (dcmi->state == STOPPING) {
384 		reg_clear(dcmi->regs, DCMI_IER, IT_FRAME | IT_OVR | IT_ERR);
385 
386 		dcmi->state = STOPPED;
387 
388 		complete(&dcmi->complete);
389 
390 		spin_unlock(&dcmi->irqlock);
391 		return IRQ_HANDLED;
392 	}
393 
394 	if ((dcmi->misr & IT_OVR) || (dcmi->misr & IT_ERR)) {
395 		/*
396 		 * An overflow or an error has been detected,
397 		 * stop current DMA transfert & restart it
398 		 */
399 		dev_warn(dcmi->dev, "%s: Overflow or error detected\n",
400 			 __func__);
401 
402 		dcmi->errors_count++;
403 		dmaengine_terminate_all(dcmi->dma_chan);
404 
405 		reg_set(dcmi->regs, DCMI_ICR, IT_FRAME | IT_OVR | IT_ERR);
406 
407 		dev_dbg(dcmi->dev, "Restarting capture after DCMI error\n");
408 
409 		if (dcmi_start_capture(dcmi)) {
410 			dev_err(dcmi->dev, "%s: Cannot restart capture on overflow or error\n",
411 				__func__);
412 
413 			spin_unlock(&dcmi->irqlock);
414 			return IRQ_HANDLED;
415 		}
416 	}
417 
418 	spin_unlock(&dcmi->irqlock);
419 	return IRQ_HANDLED;
420 }
421 
dcmi_irq_callback(int irq,void * arg)422 static irqreturn_t dcmi_irq_callback(int irq, void *arg)
423 {
424 	struct stm32_dcmi *dcmi = arg;
425 
426 	spin_lock(&dcmi->irqlock);
427 
428 	dcmi->misr = reg_read(dcmi->regs, DCMI_MIS);
429 
430 	/* Clear interrupt */
431 	reg_set(dcmi->regs, DCMI_ICR, IT_FRAME | IT_OVR | IT_ERR);
432 
433 	spin_unlock(&dcmi->irqlock);
434 
435 	return IRQ_WAKE_THREAD;
436 }
437 
dcmi_queue_setup(struct vb2_queue * vq,unsigned int * nbuffers,unsigned int * nplanes,unsigned int sizes[],struct device * alloc_devs[])438 static int dcmi_queue_setup(struct vb2_queue *vq,
439 			    unsigned int *nbuffers,
440 			    unsigned int *nplanes,
441 			    unsigned int sizes[],
442 			    struct device *alloc_devs[])
443 {
444 	struct stm32_dcmi *dcmi = vb2_get_drv_priv(vq);
445 	unsigned int size;
446 
447 	size = dcmi->fmt.fmt.pix.sizeimage;
448 
449 	/* Make sure the image size is large enough */
450 	if (*nplanes)
451 		return sizes[0] < size ? -EINVAL : 0;
452 
453 	*nplanes = 1;
454 	sizes[0] = size;
455 
456 	dcmi->active = NULL;
457 
458 	dev_dbg(dcmi->dev, "Setup queue, count=%d, size=%d\n",
459 		*nbuffers, size);
460 
461 	return 0;
462 }
463 
dcmi_buf_init(struct vb2_buffer * vb)464 static int dcmi_buf_init(struct vb2_buffer *vb)
465 {
466 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
467 	struct dcmi_buf *buf = container_of(vbuf, struct dcmi_buf, vb);
468 
469 	INIT_LIST_HEAD(&buf->list);
470 
471 	return 0;
472 }
473 
dcmi_buf_prepare(struct vb2_buffer * vb)474 static int dcmi_buf_prepare(struct vb2_buffer *vb)
475 {
476 	struct stm32_dcmi *dcmi =  vb2_get_drv_priv(vb->vb2_queue);
477 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
478 	struct dcmi_buf *buf = container_of(vbuf, struct dcmi_buf, vb);
479 	unsigned long size;
480 
481 	size = dcmi->fmt.fmt.pix.sizeimage;
482 
483 	if (vb2_plane_size(vb, 0) < size) {
484 		dev_err(dcmi->dev, "%s data will not fit into plane (%lu < %lu)\n",
485 			__func__, vb2_plane_size(vb, 0), size);
486 		return -EINVAL;
487 	}
488 
489 	vb2_set_plane_payload(vb, 0, size);
490 
491 	if (!buf->prepared) {
492 		/* Get memory addresses */
493 		buf->paddr =
494 			vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
495 		buf->size = vb2_plane_size(&buf->vb.vb2_buf, 0);
496 		buf->prepared = true;
497 
498 		vb2_set_plane_payload(&buf->vb.vb2_buf, 0, buf->size);
499 
500 		dev_dbg(dcmi->dev, "buffer[%d] phy=0x%pad size=%zu\n",
501 			vb->index, &buf->paddr, buf->size);
502 	}
503 
504 	return 0;
505 }
506 
dcmi_buf_queue(struct vb2_buffer * vb)507 static void dcmi_buf_queue(struct vb2_buffer *vb)
508 {
509 	struct stm32_dcmi *dcmi =  vb2_get_drv_priv(vb->vb2_queue);
510 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
511 	struct dcmi_buf *buf = container_of(vbuf, struct dcmi_buf, vb);
512 	unsigned long flags = 0;
513 
514 	spin_lock_irqsave(&dcmi->irqlock, flags);
515 
516 	if ((dcmi->state == RUNNING) && (!dcmi->active)) {
517 		dcmi->active = buf;
518 
519 		dev_dbg(dcmi->dev, "Starting capture on buffer[%d] queued\n",
520 			buf->vb.vb2_buf.index);
521 
522 		if (dcmi_start_capture(dcmi)) {
523 			dev_err(dcmi->dev, "%s: Cannot restart capture on overflow or error\n",
524 				__func__);
525 
526 			spin_unlock_irqrestore(&dcmi->irqlock, flags);
527 			return;
528 		}
529 	} else {
530 		/* Enqueue to video buffers list */
531 		list_add_tail(&buf->list, &dcmi->buffers);
532 	}
533 
534 	spin_unlock_irqrestore(&dcmi->irqlock, flags);
535 }
536 
dcmi_start_streaming(struct vb2_queue * vq,unsigned int count)537 static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
538 {
539 	struct stm32_dcmi *dcmi = vb2_get_drv_priv(vq);
540 	struct dcmi_buf *buf, *node;
541 	u32 val = 0;
542 	int ret;
543 
544 	ret = clk_enable(dcmi->mclk);
545 	if (ret) {
546 		dev_err(dcmi->dev, "%s: Failed to start streaming, cannot enable clock",
547 			__func__);
548 		goto err_release_buffers;
549 	}
550 
551 	/* Enable stream on the sub device */
552 	ret = v4l2_subdev_call(dcmi->entity.subdev, video, s_stream, 1);
553 	if (ret && ret != -ENOIOCTLCMD) {
554 		dev_err(dcmi->dev, "%s: Failed to start streaming, subdev streamon error",
555 			__func__);
556 		goto err_disable_clock;
557 	}
558 
559 	spin_lock_irq(&dcmi->irqlock);
560 
561 	/* Set bus width */
562 	switch (dcmi->bus.bus_width) {
563 	case 14:
564 		val |= CR_EDM_0 | CR_EDM_1;
565 		break;
566 	case 12:
567 		val |= CR_EDM_1;
568 		break;
569 	case 10:
570 		val |= CR_EDM_0;
571 		break;
572 	default:
573 		/* Set bus width to 8 bits by default */
574 		break;
575 	}
576 
577 	/* Set vertical synchronization polarity */
578 	if (dcmi->bus.flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
579 		val |= CR_VSPOL;
580 
581 	/* Set horizontal synchronization polarity */
582 	if (dcmi->bus.flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
583 		val |= CR_HSPOL;
584 
585 	/* Set pixel clock polarity */
586 	if (dcmi->bus.flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
587 		val |= CR_PCKPOL;
588 
589 	reg_write(dcmi->regs, DCMI_CR, val);
590 
591 	/* Set crop */
592 	if (dcmi->do_crop)
593 		dcmi_set_crop(dcmi);
594 
595 	/* Enable dcmi */
596 	reg_set(dcmi->regs, DCMI_CR, CR_ENABLE);
597 
598 	dcmi->state = RUNNING;
599 
600 	dcmi->sequence = 0;
601 	dcmi->errors_count = 0;
602 	dcmi->buffers_count = 0;
603 	dcmi->active = NULL;
604 
605 	/*
606 	 * Start transfer if at least one buffer has been queued,
607 	 * otherwise transfer is deferred at buffer queueing
608 	 */
609 	if (list_empty(&dcmi->buffers)) {
610 		dev_dbg(dcmi->dev, "Start streaming is deferred to next buffer queueing\n");
611 		spin_unlock_irq(&dcmi->irqlock);
612 		return 0;
613 	}
614 
615 	dcmi->active = list_entry(dcmi->buffers.next, struct dcmi_buf, list);
616 	list_del_init(&dcmi->active->list);
617 
618 	dev_dbg(dcmi->dev, "Start streaming, starting capture\n");
619 
620 	ret = dcmi_start_capture(dcmi);
621 	if (ret) {
622 		dev_err(dcmi->dev, "%s: Start streaming failed, cannot start capture",
623 			__func__);
624 
625 		spin_unlock_irq(&dcmi->irqlock);
626 		goto err_subdev_streamoff;
627 	}
628 
629 	/* Enable interruptions */
630 	reg_set(dcmi->regs, DCMI_IER, IT_FRAME | IT_OVR | IT_ERR);
631 
632 	spin_unlock_irq(&dcmi->irqlock);
633 
634 	return 0;
635 
636 err_subdev_streamoff:
637 	v4l2_subdev_call(dcmi->entity.subdev, video, s_stream, 0);
638 
639 err_disable_clock:
640 	clk_disable(dcmi->mclk);
641 
642 err_release_buffers:
643 	spin_lock_irq(&dcmi->irqlock);
644 	/*
645 	 * Return all buffers to vb2 in QUEUED state.
646 	 * This will give ownership back to userspace
647 	 */
648 	if (dcmi->active) {
649 		buf = dcmi->active;
650 		vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
651 		dcmi->active = NULL;
652 	}
653 	list_for_each_entry_safe(buf, node, &dcmi->buffers, list) {
654 		list_del_init(&buf->list);
655 		vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
656 	}
657 	spin_unlock_irq(&dcmi->irqlock);
658 
659 	return ret;
660 }
661 
dcmi_stop_streaming(struct vb2_queue * vq)662 static void dcmi_stop_streaming(struct vb2_queue *vq)
663 {
664 	struct stm32_dcmi *dcmi = vb2_get_drv_priv(vq);
665 	struct dcmi_buf *buf, *node;
666 	unsigned long time_ms = msecs_to_jiffies(TIMEOUT_MS);
667 	long timeout;
668 	int ret;
669 
670 	/* Disable stream on the sub device */
671 	ret = v4l2_subdev_call(dcmi->entity.subdev, video, s_stream, 0);
672 	if (ret && ret != -ENOIOCTLCMD)
673 		dev_err(dcmi->dev, "stream off failed in subdev\n");
674 
675 	dcmi->state = STOPPING;
676 
677 	timeout = wait_for_completion_interruptible_timeout(&dcmi->complete,
678 							    time_ms);
679 
680 	spin_lock_irq(&dcmi->irqlock);
681 
682 	/* Disable interruptions */
683 	reg_clear(dcmi->regs, DCMI_IER, IT_FRAME | IT_OVR | IT_ERR);
684 
685 	/* Disable DCMI */
686 	reg_clear(dcmi->regs, DCMI_CR, CR_ENABLE);
687 
688 	if (!timeout) {
689 		dev_err(dcmi->dev, "Timeout during stop streaming\n");
690 		dcmi->state = STOPPED;
691 	}
692 
693 	/* Return all queued buffers to vb2 in ERROR state */
694 	if (dcmi->active) {
695 		buf = dcmi->active;
696 		vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
697 		dcmi->active = NULL;
698 	}
699 	list_for_each_entry_safe(buf, node, &dcmi->buffers, list) {
700 		list_del_init(&buf->list);
701 		vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
702 	}
703 
704 	spin_unlock_irq(&dcmi->irqlock);
705 
706 	/* Stop all pending DMA operations */
707 	mutex_lock(&dcmi->dma_lock);
708 	dmaengine_terminate_all(dcmi->dma_chan);
709 	mutex_unlock(&dcmi->dma_lock);
710 
711 	clk_disable(dcmi->mclk);
712 
713 	dev_dbg(dcmi->dev, "Stop streaming, errors=%d buffers=%d\n",
714 		dcmi->errors_count, dcmi->buffers_count);
715 }
716 
717 static const struct vb2_ops dcmi_video_qops = {
718 	.queue_setup		= dcmi_queue_setup,
719 	.buf_init		= dcmi_buf_init,
720 	.buf_prepare		= dcmi_buf_prepare,
721 	.buf_queue		= dcmi_buf_queue,
722 	.start_streaming	= dcmi_start_streaming,
723 	.stop_streaming		= dcmi_stop_streaming,
724 	.wait_prepare		= vb2_ops_wait_prepare,
725 	.wait_finish		= vb2_ops_wait_finish,
726 };
727 
dcmi_g_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * fmt)728 static int dcmi_g_fmt_vid_cap(struct file *file, void *priv,
729 			      struct v4l2_format *fmt)
730 {
731 	struct stm32_dcmi *dcmi = video_drvdata(file);
732 
733 	*fmt = dcmi->fmt;
734 
735 	return 0;
736 }
737 
find_format_by_fourcc(struct stm32_dcmi * dcmi,unsigned int fourcc)738 static const struct dcmi_format *find_format_by_fourcc(struct stm32_dcmi *dcmi,
739 						       unsigned int fourcc)
740 {
741 	unsigned int num_formats = dcmi->num_of_sd_formats;
742 	const struct dcmi_format *fmt;
743 	unsigned int i;
744 
745 	for (i = 0; i < num_formats; i++) {
746 		fmt = dcmi->sd_formats[i];
747 		if (fmt->fourcc == fourcc)
748 			return fmt;
749 	}
750 
751 	return NULL;
752 }
753 
__find_outer_frame_size(struct stm32_dcmi * dcmi,struct v4l2_pix_format * pix,struct dcmi_framesize * framesize)754 static void __find_outer_frame_size(struct stm32_dcmi *dcmi,
755 				    struct v4l2_pix_format *pix,
756 				    struct dcmi_framesize *framesize)
757 {
758 	struct dcmi_framesize *match = NULL;
759 	unsigned int i;
760 	unsigned int min_err = UINT_MAX;
761 
762 	for (i = 0; i < dcmi->num_of_sd_framesizes; i++) {
763 		struct dcmi_framesize *fsize = &dcmi->sd_framesizes[i];
764 		int w_err = (fsize->width - pix->width);
765 		int h_err = (fsize->height - pix->height);
766 		int err = w_err + h_err;
767 
768 		if ((w_err >= 0) && (h_err >= 0) && (err < min_err)) {
769 			min_err = err;
770 			match = fsize;
771 		}
772 	}
773 	if (!match)
774 		match = &dcmi->sd_framesizes[0];
775 
776 	*framesize = *match;
777 }
778 
dcmi_try_fmt(struct stm32_dcmi * dcmi,struct v4l2_format * f,const struct dcmi_format ** sd_format,struct dcmi_framesize * sd_framesize)779 static int dcmi_try_fmt(struct stm32_dcmi *dcmi, struct v4l2_format *f,
780 			const struct dcmi_format **sd_format,
781 			struct dcmi_framesize *sd_framesize)
782 {
783 	const struct dcmi_format *sd_fmt;
784 	struct dcmi_framesize sd_fsize;
785 	struct v4l2_pix_format *pix = &f->fmt.pix;
786 	struct v4l2_subdev_pad_config pad_cfg;
787 	struct v4l2_subdev_format format = {
788 		.which = V4L2_SUBDEV_FORMAT_TRY,
789 	};
790 	int ret;
791 
792 	sd_fmt = find_format_by_fourcc(dcmi, pix->pixelformat);
793 	if (!sd_fmt) {
794 		if (!dcmi->num_of_sd_formats)
795 			return -ENODATA;
796 
797 		sd_fmt = dcmi->sd_formats[dcmi->num_of_sd_formats - 1];
798 		pix->pixelformat = sd_fmt->fourcc;
799 	}
800 
801 	/* Limit to hardware capabilities */
802 	pix->width = clamp(pix->width, MIN_WIDTH, MAX_WIDTH);
803 	pix->height = clamp(pix->height, MIN_HEIGHT, MAX_HEIGHT);
804 
805 	if (dcmi->do_crop && dcmi->num_of_sd_framesizes) {
806 		struct dcmi_framesize outer_sd_fsize;
807 		/*
808 		 * If crop is requested and sensor have discrete frame sizes,
809 		 * select the frame size that is just larger than request
810 		 */
811 		__find_outer_frame_size(dcmi, pix, &outer_sd_fsize);
812 		pix->width = outer_sd_fsize.width;
813 		pix->height = outer_sd_fsize.height;
814 	}
815 
816 	v4l2_fill_mbus_format(&format.format, pix, sd_fmt->mbus_code);
817 	ret = v4l2_subdev_call(dcmi->entity.subdev, pad, set_fmt,
818 			       &pad_cfg, &format);
819 	if (ret < 0)
820 		return ret;
821 
822 	/* Update pix regarding to what sensor can do */
823 	v4l2_fill_pix_format(pix, &format.format);
824 
825 	/* Save resolution that sensor can actually do */
826 	sd_fsize.width = pix->width;
827 	sd_fsize.height = pix->height;
828 
829 	if (dcmi->do_crop) {
830 		struct v4l2_rect c = dcmi->crop;
831 		struct v4l2_rect max_rect;
832 
833 		/*
834 		 * Adjust crop by making the intersection between
835 		 * format resolution request and crop request
836 		 */
837 		max_rect.top = 0;
838 		max_rect.left = 0;
839 		max_rect.width = pix->width;
840 		max_rect.height = pix->height;
841 		v4l2_rect_map_inside(&c, &max_rect);
842 		c.top  = clamp_t(s32, c.top, 0, pix->height - c.height);
843 		c.left = clamp_t(s32, c.left, 0, pix->width - c.width);
844 		dcmi->crop = c;
845 
846 		/* Adjust format resolution request to crop */
847 		pix->width = dcmi->crop.width;
848 		pix->height = dcmi->crop.height;
849 	}
850 
851 	pix->field = V4L2_FIELD_NONE;
852 	pix->bytesperline = pix->width * sd_fmt->bpp;
853 	pix->sizeimage = pix->bytesperline * pix->height;
854 
855 	if (sd_format)
856 		*sd_format = sd_fmt;
857 	if (sd_framesize)
858 		*sd_framesize = sd_fsize;
859 
860 	return 0;
861 }
862 
dcmi_set_fmt(struct stm32_dcmi * dcmi,struct v4l2_format * f)863 static int dcmi_set_fmt(struct stm32_dcmi *dcmi, struct v4l2_format *f)
864 {
865 	struct v4l2_subdev_format format = {
866 		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
867 	};
868 	const struct dcmi_format *sd_format;
869 	struct dcmi_framesize sd_framesize;
870 	struct v4l2_mbus_framefmt *mf = &format.format;
871 	struct v4l2_pix_format *pix = &f->fmt.pix;
872 	int ret;
873 
874 	/*
875 	 * Try format, fmt.width/height could have been changed
876 	 * to match sensor capability or crop request
877 	 * sd_format & sd_framesize will contain what subdev
878 	 * can do for this request.
879 	 */
880 	ret = dcmi_try_fmt(dcmi, f, &sd_format, &sd_framesize);
881 	if (ret)
882 		return ret;
883 
884 	/* pix to mbus format */
885 	v4l2_fill_mbus_format(mf, pix,
886 			      sd_format->mbus_code);
887 	mf->width = sd_framesize.width;
888 	mf->height = sd_framesize.height;
889 
890 	ret = v4l2_subdev_call(dcmi->entity.subdev, pad,
891 			       set_fmt, NULL, &format);
892 	if (ret < 0)
893 		return ret;
894 
895 	dev_dbg(dcmi->dev, "Sensor format set to 0x%x %ux%u\n",
896 		mf->code, mf->width, mf->height);
897 	dev_dbg(dcmi->dev, "Buffer format set to %4.4s %ux%u\n",
898 		(char *)&pix->pixelformat,
899 		pix->width, pix->height);
900 
901 	dcmi->fmt = *f;
902 	dcmi->sd_format = sd_format;
903 	dcmi->sd_framesize = sd_framesize;
904 
905 	return 0;
906 }
907 
dcmi_s_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)908 static int dcmi_s_fmt_vid_cap(struct file *file, void *priv,
909 			      struct v4l2_format *f)
910 {
911 	struct stm32_dcmi *dcmi = video_drvdata(file);
912 
913 	if (vb2_is_streaming(&dcmi->queue))
914 		return -EBUSY;
915 
916 	return dcmi_set_fmt(dcmi, f);
917 }
918 
dcmi_try_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)919 static int dcmi_try_fmt_vid_cap(struct file *file, void *priv,
920 				struct v4l2_format *f)
921 {
922 	struct stm32_dcmi *dcmi = video_drvdata(file);
923 
924 	return dcmi_try_fmt(dcmi, f, NULL, NULL);
925 }
926 
dcmi_enum_fmt_vid_cap(struct file * file,void * priv,struct v4l2_fmtdesc * f)927 static int dcmi_enum_fmt_vid_cap(struct file *file, void  *priv,
928 				 struct v4l2_fmtdesc *f)
929 {
930 	struct stm32_dcmi *dcmi = video_drvdata(file);
931 
932 	if (f->index >= dcmi->num_of_sd_formats)
933 		return -EINVAL;
934 
935 	f->pixelformat = dcmi->sd_formats[f->index]->fourcc;
936 	return 0;
937 }
938 
dcmi_get_sensor_format(struct stm32_dcmi * dcmi,struct v4l2_pix_format * pix)939 static int dcmi_get_sensor_format(struct stm32_dcmi *dcmi,
940 				  struct v4l2_pix_format *pix)
941 {
942 	struct v4l2_subdev_format fmt = {
943 		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
944 	};
945 	int ret;
946 
947 	ret = v4l2_subdev_call(dcmi->entity.subdev, pad, get_fmt, NULL, &fmt);
948 	if (ret)
949 		return ret;
950 
951 	v4l2_fill_pix_format(pix, &fmt.format);
952 
953 	return 0;
954 }
955 
dcmi_set_sensor_format(struct stm32_dcmi * dcmi,struct v4l2_pix_format * pix)956 static int dcmi_set_sensor_format(struct stm32_dcmi *dcmi,
957 				  struct v4l2_pix_format *pix)
958 {
959 	const struct dcmi_format *sd_fmt;
960 	struct v4l2_subdev_format format = {
961 		.which = V4L2_SUBDEV_FORMAT_TRY,
962 	};
963 	struct v4l2_subdev_pad_config pad_cfg;
964 	int ret;
965 
966 	sd_fmt = find_format_by_fourcc(dcmi, pix->pixelformat);
967 	if (!sd_fmt) {
968 		if (!dcmi->num_of_sd_formats)
969 			return -ENODATA;
970 
971 		sd_fmt = dcmi->sd_formats[dcmi->num_of_sd_formats - 1];
972 		pix->pixelformat = sd_fmt->fourcc;
973 	}
974 
975 	v4l2_fill_mbus_format(&format.format, pix, sd_fmt->mbus_code);
976 	ret = v4l2_subdev_call(dcmi->entity.subdev, pad, set_fmt,
977 			       &pad_cfg, &format);
978 	if (ret < 0)
979 		return ret;
980 
981 	return 0;
982 }
983 
dcmi_get_sensor_bounds(struct stm32_dcmi * dcmi,struct v4l2_rect * r)984 static int dcmi_get_sensor_bounds(struct stm32_dcmi *dcmi,
985 				  struct v4l2_rect *r)
986 {
987 	struct v4l2_subdev_selection bounds = {
988 		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
989 		.target = V4L2_SEL_TGT_CROP_BOUNDS,
990 	};
991 	unsigned int max_width, max_height, max_pixsize;
992 	struct v4l2_pix_format pix;
993 	unsigned int i;
994 	int ret;
995 
996 	/*
997 	 * Get sensor bounds first
998 	 */
999 	ret = v4l2_subdev_call(dcmi->entity.subdev, pad, get_selection,
1000 			       NULL, &bounds);
1001 	if (!ret)
1002 		*r = bounds.r;
1003 	if (ret != -ENOIOCTLCMD)
1004 		return ret;
1005 
1006 	/*
1007 	 * If selection is not implemented,
1008 	 * fallback by enumerating sensor frame sizes
1009 	 * and take the largest one
1010 	 */
1011 	max_width = 0;
1012 	max_height = 0;
1013 	max_pixsize = 0;
1014 	for (i = 0; i < dcmi->num_of_sd_framesizes; i++) {
1015 		struct dcmi_framesize *fsize = &dcmi->sd_framesizes[i];
1016 		unsigned int pixsize = fsize->width * fsize->height;
1017 
1018 		if (pixsize > max_pixsize) {
1019 			max_pixsize = pixsize;
1020 			max_width = fsize->width;
1021 			max_height = fsize->height;
1022 		}
1023 	}
1024 	if (max_pixsize > 0) {
1025 		r->top = 0;
1026 		r->left = 0;
1027 		r->width = max_width;
1028 		r->height = max_height;
1029 		return 0;
1030 	}
1031 
1032 	/*
1033 	 * If frame sizes enumeration is not implemented,
1034 	 * fallback by getting current sensor frame size
1035 	 */
1036 	ret = dcmi_get_sensor_format(dcmi, &pix);
1037 	if (ret)
1038 		return ret;
1039 
1040 	r->top = 0;
1041 	r->left = 0;
1042 	r->width = pix.width;
1043 	r->height = pix.height;
1044 
1045 	return 0;
1046 }
1047 
dcmi_g_selection(struct file * file,void * fh,struct v4l2_selection * s)1048 static int dcmi_g_selection(struct file *file, void *fh,
1049 			    struct v4l2_selection *s)
1050 {
1051 	struct stm32_dcmi *dcmi = video_drvdata(file);
1052 
1053 	if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1054 		return -EINVAL;
1055 
1056 	switch (s->target) {
1057 	case V4L2_SEL_TGT_CROP_DEFAULT:
1058 	case V4L2_SEL_TGT_CROP_BOUNDS:
1059 		s->r = dcmi->sd_bounds;
1060 		return 0;
1061 	case V4L2_SEL_TGT_CROP:
1062 		if (dcmi->do_crop) {
1063 			s->r = dcmi->crop;
1064 		} else {
1065 			s->r.top = 0;
1066 			s->r.left = 0;
1067 			s->r.width = dcmi->fmt.fmt.pix.width;
1068 			s->r.height = dcmi->fmt.fmt.pix.height;
1069 		}
1070 		break;
1071 	default:
1072 		return -EINVAL;
1073 	}
1074 
1075 	return 0;
1076 }
1077 
dcmi_s_selection(struct file * file,void * priv,struct v4l2_selection * s)1078 static int dcmi_s_selection(struct file *file, void *priv,
1079 			    struct v4l2_selection *s)
1080 {
1081 	struct stm32_dcmi *dcmi = video_drvdata(file);
1082 	struct v4l2_rect r = s->r;
1083 	struct v4l2_rect max_rect;
1084 	struct v4l2_pix_format pix;
1085 
1086 	if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
1087 	    s->target != V4L2_SEL_TGT_CROP)
1088 		return -EINVAL;
1089 
1090 	/* Reset sensor resolution to max resolution */
1091 	pix.pixelformat = dcmi->fmt.fmt.pix.pixelformat;
1092 	pix.width = dcmi->sd_bounds.width;
1093 	pix.height = dcmi->sd_bounds.height;
1094 	dcmi_set_sensor_format(dcmi, &pix);
1095 
1096 	/*
1097 	 * Make the intersection between
1098 	 * sensor resolution
1099 	 * and crop request
1100 	 */
1101 	max_rect.top = 0;
1102 	max_rect.left = 0;
1103 	max_rect.width = pix.width;
1104 	max_rect.height = pix.height;
1105 	v4l2_rect_map_inside(&r, &max_rect);
1106 	r.top  = clamp_t(s32, r.top, 0, pix.height - r.height);
1107 	r.left = clamp_t(s32, r.left, 0, pix.width - r.width);
1108 
1109 	if (!((r.top == dcmi->sd_bounds.top) &&
1110 	      (r.left == dcmi->sd_bounds.left) &&
1111 	      (r.width == dcmi->sd_bounds.width) &&
1112 	      (r.height == dcmi->sd_bounds.height))) {
1113 		/* Crop if request is different than sensor resolution */
1114 		dcmi->do_crop = true;
1115 		dcmi->crop = r;
1116 		dev_dbg(dcmi->dev, "s_selection: crop %ux%u@(%u,%u) from %ux%u\n",
1117 			r.width, r.height, r.left, r.top,
1118 			pix.width, pix.height);
1119 	} else {
1120 		/* Disable crop */
1121 		dcmi->do_crop = false;
1122 		dev_dbg(dcmi->dev, "s_selection: crop is disabled\n");
1123 	}
1124 
1125 	s->r = r;
1126 	return 0;
1127 }
1128 
dcmi_querycap(struct file * file,void * priv,struct v4l2_capability * cap)1129 static int dcmi_querycap(struct file *file, void *priv,
1130 			 struct v4l2_capability *cap)
1131 {
1132 	strlcpy(cap->driver, DRV_NAME, sizeof(cap->driver));
1133 	strlcpy(cap->card, "STM32 Camera Memory Interface",
1134 		sizeof(cap->card));
1135 	strlcpy(cap->bus_info, "platform:dcmi", sizeof(cap->bus_info));
1136 	return 0;
1137 }
1138 
dcmi_enum_input(struct file * file,void * priv,struct v4l2_input * i)1139 static int dcmi_enum_input(struct file *file, void *priv,
1140 			   struct v4l2_input *i)
1141 {
1142 	if (i->index != 0)
1143 		return -EINVAL;
1144 
1145 	i->type = V4L2_INPUT_TYPE_CAMERA;
1146 	strlcpy(i->name, "Camera", sizeof(i->name));
1147 	return 0;
1148 }
1149 
dcmi_g_input(struct file * file,void * priv,unsigned int * i)1150 static int dcmi_g_input(struct file *file, void *priv, unsigned int *i)
1151 {
1152 	*i = 0;
1153 	return 0;
1154 }
1155 
dcmi_s_input(struct file * file,void * priv,unsigned int i)1156 static int dcmi_s_input(struct file *file, void *priv, unsigned int i)
1157 {
1158 	if (i > 0)
1159 		return -EINVAL;
1160 	return 0;
1161 }
1162 
dcmi_enum_framesizes(struct file * file,void * fh,struct v4l2_frmsizeenum * fsize)1163 static int dcmi_enum_framesizes(struct file *file, void *fh,
1164 				struct v4l2_frmsizeenum *fsize)
1165 {
1166 	struct stm32_dcmi *dcmi = video_drvdata(file);
1167 	const struct dcmi_format *sd_fmt;
1168 	struct v4l2_subdev_frame_size_enum fse = {
1169 		.index = fsize->index,
1170 		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
1171 	};
1172 	int ret;
1173 
1174 	sd_fmt = find_format_by_fourcc(dcmi, fsize->pixel_format);
1175 	if (!sd_fmt)
1176 		return -EINVAL;
1177 
1178 	fse.code = sd_fmt->mbus_code;
1179 
1180 	ret = v4l2_subdev_call(dcmi->entity.subdev, pad, enum_frame_size,
1181 			       NULL, &fse);
1182 	if (ret)
1183 		return ret;
1184 
1185 	fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
1186 	fsize->discrete.width = fse.max_width;
1187 	fsize->discrete.height = fse.max_height;
1188 
1189 	return 0;
1190 }
1191 
dcmi_enum_frameintervals(struct file * file,void * fh,struct v4l2_frmivalenum * fival)1192 static int dcmi_enum_frameintervals(struct file *file, void *fh,
1193 				    struct v4l2_frmivalenum *fival)
1194 {
1195 	struct stm32_dcmi *dcmi = video_drvdata(file);
1196 	const struct dcmi_format *sd_fmt;
1197 	struct v4l2_subdev_frame_interval_enum fie = {
1198 		.index = fival->index,
1199 		.width = fival->width,
1200 		.height = fival->height,
1201 		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
1202 	};
1203 	int ret;
1204 
1205 	sd_fmt = find_format_by_fourcc(dcmi, fival->pixel_format);
1206 	if (!sd_fmt)
1207 		return -EINVAL;
1208 
1209 	fie.code = sd_fmt->mbus_code;
1210 
1211 	ret = v4l2_subdev_call(dcmi->entity.subdev, pad,
1212 			       enum_frame_interval, NULL, &fie);
1213 	if (ret)
1214 		return ret;
1215 
1216 	fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
1217 	fival->discrete = fie.interval;
1218 
1219 	return 0;
1220 }
1221 
1222 static const struct of_device_id stm32_dcmi_of_match[] = {
1223 	{ .compatible = "st,stm32-dcmi"},
1224 	{ /* end node */ },
1225 };
1226 MODULE_DEVICE_TABLE(of, stm32_dcmi_of_match);
1227 
dcmi_open(struct file * file)1228 static int dcmi_open(struct file *file)
1229 {
1230 	struct stm32_dcmi *dcmi = video_drvdata(file);
1231 	struct v4l2_subdev *sd = dcmi->entity.subdev;
1232 	int ret;
1233 
1234 	if (mutex_lock_interruptible(&dcmi->lock))
1235 		return -ERESTARTSYS;
1236 
1237 	ret = v4l2_fh_open(file);
1238 	if (ret < 0)
1239 		goto unlock;
1240 
1241 	if (!v4l2_fh_is_singular_file(file))
1242 		goto fh_rel;
1243 
1244 	ret = v4l2_subdev_call(sd, core, s_power, 1);
1245 	if (ret < 0 && ret != -ENOIOCTLCMD)
1246 		goto fh_rel;
1247 
1248 	ret = dcmi_set_fmt(dcmi, &dcmi->fmt);
1249 	if (ret)
1250 		v4l2_subdev_call(sd, core, s_power, 0);
1251 fh_rel:
1252 	if (ret)
1253 		v4l2_fh_release(file);
1254 unlock:
1255 	mutex_unlock(&dcmi->lock);
1256 	return ret;
1257 }
1258 
dcmi_release(struct file * file)1259 static int dcmi_release(struct file *file)
1260 {
1261 	struct stm32_dcmi *dcmi = video_drvdata(file);
1262 	struct v4l2_subdev *sd = dcmi->entity.subdev;
1263 	bool fh_singular;
1264 	int ret;
1265 
1266 	mutex_lock(&dcmi->lock);
1267 
1268 	fh_singular = v4l2_fh_is_singular_file(file);
1269 
1270 	ret = _vb2_fop_release(file, NULL);
1271 
1272 	if (fh_singular)
1273 		v4l2_subdev_call(sd, core, s_power, 0);
1274 
1275 	mutex_unlock(&dcmi->lock);
1276 
1277 	return ret;
1278 }
1279 
1280 static const struct v4l2_ioctl_ops dcmi_ioctl_ops = {
1281 	.vidioc_querycap		= dcmi_querycap,
1282 
1283 	.vidioc_try_fmt_vid_cap		= dcmi_try_fmt_vid_cap,
1284 	.vidioc_g_fmt_vid_cap		= dcmi_g_fmt_vid_cap,
1285 	.vidioc_s_fmt_vid_cap		= dcmi_s_fmt_vid_cap,
1286 	.vidioc_enum_fmt_vid_cap	= dcmi_enum_fmt_vid_cap,
1287 	.vidioc_g_selection		= dcmi_g_selection,
1288 	.vidioc_s_selection		= dcmi_s_selection,
1289 
1290 	.vidioc_enum_input		= dcmi_enum_input,
1291 	.vidioc_g_input			= dcmi_g_input,
1292 	.vidioc_s_input			= dcmi_s_input,
1293 
1294 	.vidioc_enum_framesizes		= dcmi_enum_framesizes,
1295 	.vidioc_enum_frameintervals	= dcmi_enum_frameintervals,
1296 
1297 	.vidioc_reqbufs			= vb2_ioctl_reqbufs,
1298 	.vidioc_create_bufs		= vb2_ioctl_create_bufs,
1299 	.vidioc_querybuf		= vb2_ioctl_querybuf,
1300 	.vidioc_qbuf			= vb2_ioctl_qbuf,
1301 	.vidioc_dqbuf			= vb2_ioctl_dqbuf,
1302 	.vidioc_expbuf			= vb2_ioctl_expbuf,
1303 	.vidioc_prepare_buf		= vb2_ioctl_prepare_buf,
1304 	.vidioc_streamon		= vb2_ioctl_streamon,
1305 	.vidioc_streamoff		= vb2_ioctl_streamoff,
1306 
1307 	.vidioc_log_status		= v4l2_ctrl_log_status,
1308 	.vidioc_subscribe_event		= v4l2_ctrl_subscribe_event,
1309 	.vidioc_unsubscribe_event	= v4l2_event_unsubscribe,
1310 };
1311 
1312 static const struct v4l2_file_operations dcmi_fops = {
1313 	.owner		= THIS_MODULE,
1314 	.unlocked_ioctl	= video_ioctl2,
1315 	.open		= dcmi_open,
1316 	.release	= dcmi_release,
1317 	.poll		= vb2_fop_poll,
1318 	.mmap		= vb2_fop_mmap,
1319 #ifndef CONFIG_MMU
1320 	.get_unmapped_area = vb2_fop_get_unmapped_area,
1321 #endif
1322 	.read		= vb2_fop_read,
1323 };
1324 
dcmi_set_default_fmt(struct stm32_dcmi * dcmi)1325 static int dcmi_set_default_fmt(struct stm32_dcmi *dcmi)
1326 {
1327 	struct v4l2_format f = {
1328 		.type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
1329 		.fmt.pix = {
1330 			.width		= CIF_WIDTH,
1331 			.height		= CIF_HEIGHT,
1332 			.field		= V4L2_FIELD_NONE,
1333 			.pixelformat	= dcmi->sd_formats[0]->fourcc,
1334 		},
1335 	};
1336 	int ret;
1337 
1338 	ret = dcmi_try_fmt(dcmi, &f, NULL, NULL);
1339 	if (ret)
1340 		return ret;
1341 	dcmi->sd_format = dcmi->sd_formats[0];
1342 	dcmi->fmt = f;
1343 	return 0;
1344 }
1345 
1346 static const struct dcmi_format dcmi_formats[] = {
1347 	{
1348 		.fourcc = V4L2_PIX_FMT_RGB565,
1349 		.mbus_code = MEDIA_BUS_FMT_RGB565_2X8_LE,
1350 		.bpp = 2,
1351 	}, {
1352 		.fourcc = V4L2_PIX_FMT_YUYV,
1353 		.mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
1354 		.bpp = 2,
1355 	}, {
1356 		.fourcc = V4L2_PIX_FMT_UYVY,
1357 		.mbus_code = MEDIA_BUS_FMT_UYVY8_2X8,
1358 		.bpp = 2,
1359 	},
1360 };
1361 
dcmi_formats_init(struct stm32_dcmi * dcmi)1362 static int dcmi_formats_init(struct stm32_dcmi *dcmi)
1363 {
1364 	const struct dcmi_format *sd_fmts[ARRAY_SIZE(dcmi_formats)];
1365 	unsigned int num_fmts = 0, i, j;
1366 	struct v4l2_subdev *subdev = dcmi->entity.subdev;
1367 	struct v4l2_subdev_mbus_code_enum mbus_code = {
1368 		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
1369 	};
1370 
1371 	while (!v4l2_subdev_call(subdev, pad, enum_mbus_code,
1372 				 NULL, &mbus_code)) {
1373 		for (i = 0; i < ARRAY_SIZE(dcmi_formats); i++) {
1374 			if (dcmi_formats[i].mbus_code != mbus_code.code)
1375 				continue;
1376 
1377 			/* Code supported, have we got this fourcc yet? */
1378 			for (j = 0; j < num_fmts; j++)
1379 				if (sd_fmts[j]->fourcc ==
1380 						dcmi_formats[i].fourcc)
1381 					/* Already available */
1382 					break;
1383 			if (j == num_fmts)
1384 				/* New */
1385 				sd_fmts[num_fmts++] = dcmi_formats + i;
1386 		}
1387 		mbus_code.index++;
1388 	}
1389 
1390 	if (!num_fmts)
1391 		return -ENXIO;
1392 
1393 	dcmi->num_of_sd_formats = num_fmts;
1394 	dcmi->sd_formats = devm_kcalloc(dcmi->dev,
1395 					num_fmts, sizeof(struct dcmi_format *),
1396 					GFP_KERNEL);
1397 	if (!dcmi->sd_formats) {
1398 		dev_err(dcmi->dev, "Could not allocate memory\n");
1399 		return -ENOMEM;
1400 	}
1401 
1402 	memcpy(dcmi->sd_formats, sd_fmts,
1403 	       num_fmts * sizeof(struct dcmi_format *));
1404 	dcmi->sd_format = dcmi->sd_formats[0];
1405 
1406 	return 0;
1407 }
1408 
dcmi_framesizes_init(struct stm32_dcmi * dcmi)1409 static int dcmi_framesizes_init(struct stm32_dcmi *dcmi)
1410 {
1411 	unsigned int num_fsize = 0;
1412 	struct v4l2_subdev *subdev = dcmi->entity.subdev;
1413 	struct v4l2_subdev_frame_size_enum fse = {
1414 		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
1415 		.code = dcmi->sd_format->mbus_code,
1416 	};
1417 	unsigned int ret;
1418 	unsigned int i;
1419 
1420 	/* Allocate discrete framesizes array */
1421 	while (!v4l2_subdev_call(subdev, pad, enum_frame_size,
1422 				 NULL, &fse))
1423 		fse.index++;
1424 
1425 	num_fsize = fse.index;
1426 	if (!num_fsize)
1427 		return 0;
1428 
1429 	dcmi->num_of_sd_framesizes = num_fsize;
1430 	dcmi->sd_framesizes = devm_kcalloc(dcmi->dev, num_fsize,
1431 					   sizeof(struct dcmi_framesize),
1432 					   GFP_KERNEL);
1433 	if (!dcmi->sd_framesizes) {
1434 		dev_err(dcmi->dev, "Could not allocate memory\n");
1435 		return -ENOMEM;
1436 	}
1437 
1438 	/* Fill array with sensor supported framesizes */
1439 	dev_dbg(dcmi->dev, "Sensor supports %u frame sizes:\n", num_fsize);
1440 	for (i = 0; i < dcmi->num_of_sd_framesizes; i++) {
1441 		fse.index = i;
1442 		ret = v4l2_subdev_call(subdev, pad, enum_frame_size,
1443 				       NULL, &fse);
1444 		if (ret)
1445 			return ret;
1446 		dcmi->sd_framesizes[fse.index].width = fse.max_width;
1447 		dcmi->sd_framesizes[fse.index].height = fse.max_height;
1448 		dev_dbg(dcmi->dev, "%ux%u\n", fse.max_width, fse.max_height);
1449 	}
1450 
1451 	return 0;
1452 }
1453 
dcmi_graph_notify_complete(struct v4l2_async_notifier * notifier)1454 static int dcmi_graph_notify_complete(struct v4l2_async_notifier *notifier)
1455 {
1456 	struct stm32_dcmi *dcmi = notifier_to_dcmi(notifier);
1457 	int ret;
1458 
1459 	dcmi->vdev->ctrl_handler = dcmi->entity.subdev->ctrl_handler;
1460 	ret = dcmi_formats_init(dcmi);
1461 	if (ret) {
1462 		dev_err(dcmi->dev, "No supported mediabus format found\n");
1463 		return ret;
1464 	}
1465 
1466 	ret = dcmi_framesizes_init(dcmi);
1467 	if (ret) {
1468 		dev_err(dcmi->dev, "Could not initialize framesizes\n");
1469 		return ret;
1470 	}
1471 
1472 	ret = dcmi_get_sensor_bounds(dcmi, &dcmi->sd_bounds);
1473 	if (ret) {
1474 		dev_err(dcmi->dev, "Could not get sensor bounds\n");
1475 		return ret;
1476 	}
1477 
1478 	ret = dcmi_set_default_fmt(dcmi);
1479 	if (ret) {
1480 		dev_err(dcmi->dev, "Could not set default format\n");
1481 		return ret;
1482 	}
1483 
1484 	ret = video_register_device(dcmi->vdev, VFL_TYPE_GRABBER, -1);
1485 	if (ret) {
1486 		dev_err(dcmi->dev, "Failed to register video device\n");
1487 		return ret;
1488 	}
1489 
1490 	dev_dbg(dcmi->dev, "Device registered as %s\n",
1491 		video_device_node_name(dcmi->vdev));
1492 	return 0;
1493 }
1494 
dcmi_graph_notify_unbind(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd,struct v4l2_async_subdev * asd)1495 static void dcmi_graph_notify_unbind(struct v4l2_async_notifier *notifier,
1496 				     struct v4l2_subdev *sd,
1497 				     struct v4l2_async_subdev *asd)
1498 {
1499 	struct stm32_dcmi *dcmi = notifier_to_dcmi(notifier);
1500 
1501 	dev_dbg(dcmi->dev, "Removing %s\n", video_device_node_name(dcmi->vdev));
1502 
1503 	/* Checks internaly if vdev has been init or not */
1504 	video_unregister_device(dcmi->vdev);
1505 }
1506 
dcmi_graph_notify_bound(struct v4l2_async_notifier * notifier,struct v4l2_subdev * subdev,struct v4l2_async_subdev * asd)1507 static int dcmi_graph_notify_bound(struct v4l2_async_notifier *notifier,
1508 				   struct v4l2_subdev *subdev,
1509 				   struct v4l2_async_subdev *asd)
1510 {
1511 	struct stm32_dcmi *dcmi = notifier_to_dcmi(notifier);
1512 
1513 	dev_dbg(dcmi->dev, "Subdev %s bound\n", subdev->name);
1514 
1515 	dcmi->entity.subdev = subdev;
1516 
1517 	return 0;
1518 }
1519 
dcmi_graph_parse(struct stm32_dcmi * dcmi,struct device_node * node)1520 static int dcmi_graph_parse(struct stm32_dcmi *dcmi, struct device_node *node)
1521 {
1522 	struct device_node *ep = NULL;
1523 	struct device_node *remote;
1524 
1525 	while (1) {
1526 		ep = of_graph_get_next_endpoint(node, ep);
1527 		if (!ep)
1528 			return -EINVAL;
1529 
1530 		remote = of_graph_get_remote_port_parent(ep);
1531 		if (!remote) {
1532 			of_node_put(ep);
1533 			return -EINVAL;
1534 		}
1535 
1536 		/* Remote node to connect */
1537 		dcmi->entity.node = remote;
1538 		dcmi->entity.asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
1539 		dcmi->entity.asd.match.fwnode.fwnode = of_fwnode_handle(remote);
1540 		return 0;
1541 	}
1542 }
1543 
dcmi_graph_init(struct stm32_dcmi * dcmi)1544 static int dcmi_graph_init(struct stm32_dcmi *dcmi)
1545 {
1546 	struct v4l2_async_subdev **subdevs = NULL;
1547 	int ret;
1548 
1549 	/* Parse the graph to extract a list of subdevice DT nodes. */
1550 	ret = dcmi_graph_parse(dcmi, dcmi->dev->of_node);
1551 	if (ret < 0) {
1552 		dev_err(dcmi->dev, "Graph parsing failed\n");
1553 		return ret;
1554 	}
1555 
1556 	/* Register the subdevices notifier. */
1557 	subdevs = devm_kzalloc(dcmi->dev, sizeof(*subdevs), GFP_KERNEL);
1558 	if (!subdevs) {
1559 		of_node_put(dcmi->entity.node);
1560 		return -ENOMEM;
1561 	}
1562 
1563 	subdevs[0] = &dcmi->entity.asd;
1564 
1565 	dcmi->notifier.subdevs = subdevs;
1566 	dcmi->notifier.num_subdevs = 1;
1567 	dcmi->notifier.bound = dcmi_graph_notify_bound;
1568 	dcmi->notifier.unbind = dcmi_graph_notify_unbind;
1569 	dcmi->notifier.complete = dcmi_graph_notify_complete;
1570 
1571 	ret = v4l2_async_notifier_register(&dcmi->v4l2_dev, &dcmi->notifier);
1572 	if (ret < 0) {
1573 		dev_err(dcmi->dev, "Notifier registration failed\n");
1574 		of_node_put(dcmi->entity.node);
1575 		return ret;
1576 	}
1577 
1578 	return 0;
1579 }
1580 
dcmi_probe(struct platform_device * pdev)1581 static int dcmi_probe(struct platform_device *pdev)
1582 {
1583 	struct device_node *np = pdev->dev.of_node;
1584 	const struct of_device_id *match = NULL;
1585 	struct v4l2_fwnode_endpoint ep;
1586 	struct stm32_dcmi *dcmi;
1587 	struct vb2_queue *q;
1588 	struct dma_chan *chan;
1589 	struct clk *mclk;
1590 	int irq;
1591 	int ret = 0;
1592 
1593 	match = of_match_device(of_match_ptr(stm32_dcmi_of_match), &pdev->dev);
1594 	if (!match) {
1595 		dev_err(&pdev->dev, "Could not find a match in devicetree\n");
1596 		return -ENODEV;
1597 	}
1598 
1599 	dcmi = devm_kzalloc(&pdev->dev, sizeof(struct stm32_dcmi), GFP_KERNEL);
1600 	if (!dcmi)
1601 		return -ENOMEM;
1602 
1603 	dcmi->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
1604 	if (IS_ERR(dcmi->rstc)) {
1605 		dev_err(&pdev->dev, "Could not get reset control\n");
1606 		return -ENODEV;
1607 	}
1608 
1609 	/* Get bus characteristics from devicetree */
1610 	np = of_graph_get_next_endpoint(np, NULL);
1611 	if (!np) {
1612 		dev_err(&pdev->dev, "Could not find the endpoint\n");
1613 		of_node_put(np);
1614 		return -ENODEV;
1615 	}
1616 
1617 	ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(np), &ep);
1618 	if (ret) {
1619 		dev_err(&pdev->dev, "Could not parse the endpoint\n");
1620 		of_node_put(np);
1621 		return -ENODEV;
1622 	}
1623 
1624 	if (ep.bus_type == V4L2_MBUS_CSI2) {
1625 		dev_err(&pdev->dev, "CSI bus not supported\n");
1626 		of_node_put(np);
1627 		return -ENODEV;
1628 	}
1629 	dcmi->bus.flags = ep.bus.parallel.flags;
1630 	dcmi->bus.bus_width = ep.bus.parallel.bus_width;
1631 	dcmi->bus.data_shift = ep.bus.parallel.data_shift;
1632 
1633 	of_node_put(np);
1634 
1635 	irq = platform_get_irq(pdev, 0);
1636 	if (irq <= 0) {
1637 		dev_err(&pdev->dev, "Could not get irq\n");
1638 		return -ENODEV;
1639 	}
1640 
1641 	dcmi->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1642 	if (!dcmi->res) {
1643 		dev_err(&pdev->dev, "Could not get resource\n");
1644 		return -ENODEV;
1645 	}
1646 
1647 	dcmi->regs = devm_ioremap_resource(&pdev->dev, dcmi->res);
1648 	if (IS_ERR(dcmi->regs)) {
1649 		dev_err(&pdev->dev, "Could not map registers\n");
1650 		return PTR_ERR(dcmi->regs);
1651 	}
1652 
1653 	ret = devm_request_threaded_irq(&pdev->dev, irq, dcmi_irq_callback,
1654 					dcmi_irq_thread, IRQF_ONESHOT,
1655 					dev_name(&pdev->dev), dcmi);
1656 	if (ret) {
1657 		dev_err(&pdev->dev, "Unable to request irq %d\n", irq);
1658 		return -ENODEV;
1659 	}
1660 
1661 	mclk = devm_clk_get(&pdev->dev, "mclk");
1662 	if (IS_ERR(mclk)) {
1663 		dev_err(&pdev->dev, "Unable to get mclk\n");
1664 		return PTR_ERR(mclk);
1665 	}
1666 
1667 	chan = dma_request_slave_channel(&pdev->dev, "tx");
1668 	if (!chan) {
1669 		dev_info(&pdev->dev, "Unable to request DMA channel, defer probing\n");
1670 		return -EPROBE_DEFER;
1671 	}
1672 
1673 	ret = clk_prepare(mclk);
1674 	if (ret) {
1675 		dev_err(&pdev->dev, "Unable to prepare mclk %p\n", mclk);
1676 		goto err_dma_release;
1677 	}
1678 
1679 	spin_lock_init(&dcmi->irqlock);
1680 	mutex_init(&dcmi->lock);
1681 	mutex_init(&dcmi->dma_lock);
1682 	init_completion(&dcmi->complete);
1683 	INIT_LIST_HEAD(&dcmi->buffers);
1684 
1685 	dcmi->dev = &pdev->dev;
1686 	dcmi->mclk = mclk;
1687 	dcmi->state = STOPPED;
1688 	dcmi->dma_chan = chan;
1689 
1690 	q = &dcmi->queue;
1691 
1692 	/* Initialize the top-level structure */
1693 	ret = v4l2_device_register(&pdev->dev, &dcmi->v4l2_dev);
1694 	if (ret)
1695 		goto err_clk_unprepare;
1696 
1697 	dcmi->vdev = video_device_alloc();
1698 	if (!dcmi->vdev) {
1699 		ret = -ENOMEM;
1700 		goto err_device_unregister;
1701 	}
1702 
1703 	/* Video node */
1704 	dcmi->vdev->fops = &dcmi_fops;
1705 	dcmi->vdev->v4l2_dev = &dcmi->v4l2_dev;
1706 	dcmi->vdev->queue = &dcmi->queue;
1707 	strlcpy(dcmi->vdev->name, KBUILD_MODNAME, sizeof(dcmi->vdev->name));
1708 	dcmi->vdev->release = video_device_release;
1709 	dcmi->vdev->ioctl_ops = &dcmi_ioctl_ops;
1710 	dcmi->vdev->lock = &dcmi->lock;
1711 	dcmi->vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
1712 				  V4L2_CAP_READWRITE;
1713 	video_set_drvdata(dcmi->vdev, dcmi);
1714 
1715 	/* Buffer queue */
1716 	q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1717 	q->io_modes = VB2_MMAP | VB2_READ | VB2_DMABUF;
1718 	q->lock = &dcmi->lock;
1719 	q->drv_priv = dcmi;
1720 	q->buf_struct_size = sizeof(struct dcmi_buf);
1721 	q->ops = &dcmi_video_qops;
1722 	q->mem_ops = &vb2_dma_contig_memops;
1723 	q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1724 	q->min_buffers_needed = 2;
1725 	q->dev = &pdev->dev;
1726 
1727 	ret = vb2_queue_init(q);
1728 	if (ret < 0) {
1729 		dev_err(&pdev->dev, "Failed to initialize vb2 queue\n");
1730 		goto err_device_release;
1731 	}
1732 
1733 	ret = dcmi_graph_init(dcmi);
1734 	if (ret < 0)
1735 		goto err_device_release;
1736 
1737 	/* Reset device */
1738 	ret = reset_control_assert(dcmi->rstc);
1739 	if (ret) {
1740 		dev_err(&pdev->dev, "Failed to assert the reset line\n");
1741 		goto err_device_release;
1742 	}
1743 
1744 	usleep_range(3000, 5000);
1745 
1746 	ret = reset_control_deassert(dcmi->rstc);
1747 	if (ret) {
1748 		dev_err(&pdev->dev, "Failed to deassert the reset line\n");
1749 		goto err_device_release;
1750 	}
1751 
1752 	dev_info(&pdev->dev, "Probe done\n");
1753 
1754 	platform_set_drvdata(pdev, dcmi);
1755 	return 0;
1756 
1757 err_device_release:
1758 	video_device_release(dcmi->vdev);
1759 err_device_unregister:
1760 	v4l2_device_unregister(&dcmi->v4l2_dev);
1761 err_clk_unprepare:
1762 	clk_unprepare(dcmi->mclk);
1763 err_dma_release:
1764 	dma_release_channel(dcmi->dma_chan);
1765 
1766 	return ret;
1767 }
1768 
dcmi_remove(struct platform_device * pdev)1769 static int dcmi_remove(struct platform_device *pdev)
1770 {
1771 	struct stm32_dcmi *dcmi = platform_get_drvdata(pdev);
1772 
1773 	v4l2_async_notifier_unregister(&dcmi->notifier);
1774 	v4l2_device_unregister(&dcmi->v4l2_dev);
1775 	clk_unprepare(dcmi->mclk);
1776 	dma_release_channel(dcmi->dma_chan);
1777 
1778 	return 0;
1779 }
1780 
1781 static struct platform_driver stm32_dcmi_driver = {
1782 	.probe		= dcmi_probe,
1783 	.remove		= dcmi_remove,
1784 	.driver		= {
1785 		.name = DRV_NAME,
1786 		.of_match_table = of_match_ptr(stm32_dcmi_of_match),
1787 	},
1788 };
1789 
1790 module_platform_driver(stm32_dcmi_driver);
1791 
1792 MODULE_AUTHOR("Yannick Fertre <yannick.fertre@st.com>");
1793 MODULE_AUTHOR("Hugues Fruchet <hugues.fruchet@st.com>");
1794 MODULE_DESCRIPTION("STMicroelectronics STM32 Digital Camera Memory Interface driver");
1795 MODULE_LICENSE("GPL");
1796 MODULE_SUPPORTED_DEVICE("video");
1797