• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2017,2020 Intel Corporation
4  *
5  * Based partially on Intel IPU4 driver written by
6  *  Sakari Ailus <sakari.ailus@linux.intel.com>
7  *  Samu Onkalo <samu.onkalo@intel.com>
8  *  Jouni Högander <jouni.hogander@intel.com>
9  *  Jouni Ukkonen <jouni.ukkonen@intel.com>
10  *  Antti Laakso <antti.laakso@intel.com>
11  * et al.
12  */
13 
14 #include <linux/delay.h>
15 #include <linux/interrupt.h>
16 #include <linux/iopoll.h>
17 #include <linux/mm.h>
18 #include <linux/module.h>
19 #include <linux/pci.h>
20 #include <linux/pfn.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/property.h>
23 #include <linux/vmalloc.h>
24 #include <media/v4l2-ctrls.h>
25 #include <media/v4l2-device.h>
26 #include <media/v4l2-event.h>
27 #include <media/v4l2-fwnode.h>
28 #include <media/v4l2-ioctl.h>
29 #include <media/videobuf2-dma-sg.h>
30 
31 #include "ipu3-cio2.h"
32 
33 struct ipu3_cio2_fmt {
34 	u32 mbus_code;
35 	u32 fourcc;
36 	u8 mipicode;
37 	u8 bpp;
38 };
39 
40 /*
41  * These are raw formats used in Intel's third generation of
42  * Image Processing Unit known as IPU3.
43  * 10bit raw bayer packed, 32 bytes for every 25 pixels,
44  * last LSB 6 bits unused.
45  */
46 static const struct ipu3_cio2_fmt formats[] = {
47 	{	/* put default entry at beginning */
48 		.mbus_code	= MEDIA_BUS_FMT_SGRBG10_1X10,
49 		.fourcc		= V4L2_PIX_FMT_IPU3_SGRBG10,
50 		.mipicode	= 0x2b,
51 		.bpp		= 10,
52 	}, {
53 		.mbus_code	= MEDIA_BUS_FMT_SGBRG10_1X10,
54 		.fourcc		= V4L2_PIX_FMT_IPU3_SGBRG10,
55 		.mipicode	= 0x2b,
56 		.bpp		= 10,
57 	}, {
58 		.mbus_code	= MEDIA_BUS_FMT_SBGGR10_1X10,
59 		.fourcc		= V4L2_PIX_FMT_IPU3_SBGGR10,
60 		.mipicode	= 0x2b,
61 		.bpp		= 10,
62 	}, {
63 		.mbus_code	= MEDIA_BUS_FMT_SRGGB10_1X10,
64 		.fourcc		= V4L2_PIX_FMT_IPU3_SRGGB10,
65 		.mipicode	= 0x2b,
66 		.bpp		= 10,
67 	},
68 };
69 
70 /*
71  * cio2_find_format - lookup color format by fourcc or/and media bus code
72  * @pixelformat: fourcc to match, ignored if null
73  * @mbus_code: media bus code to match, ignored if null
74  */
cio2_find_format(const u32 * pixelformat,const u32 * mbus_code)75 static const struct ipu3_cio2_fmt *cio2_find_format(const u32 *pixelformat,
76 						    const u32 *mbus_code)
77 {
78 	unsigned int i;
79 
80 	for (i = 0; i < ARRAY_SIZE(formats); i++) {
81 		if (pixelformat && *pixelformat != formats[i].fourcc)
82 			continue;
83 		if (mbus_code && *mbus_code != formats[i].mbus_code)
84 			continue;
85 
86 		return &formats[i];
87 	}
88 
89 	return NULL;
90 }
91 
cio2_bytesperline(const unsigned int width)92 static inline u32 cio2_bytesperline(const unsigned int width)
93 {
94 	/*
95 	 * 64 bytes for every 50 pixels, the line length
96 	 * in bytes is multiple of 64 (line end alignment).
97 	 */
98 	return DIV_ROUND_UP(width, 50) * 64;
99 }
100 
101 /**************** FBPT operations ****************/
102 
cio2_fbpt_exit_dummy(struct cio2_device * cio2)103 static void cio2_fbpt_exit_dummy(struct cio2_device *cio2)
104 {
105 	if (cio2->dummy_lop) {
106 		dma_free_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
107 				  cio2->dummy_lop, cio2->dummy_lop_bus_addr);
108 		cio2->dummy_lop = NULL;
109 	}
110 	if (cio2->dummy_page) {
111 		dma_free_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
112 				  cio2->dummy_page, cio2->dummy_page_bus_addr);
113 		cio2->dummy_page = NULL;
114 	}
115 }
116 
cio2_fbpt_init_dummy(struct cio2_device * cio2)117 static int cio2_fbpt_init_dummy(struct cio2_device *cio2)
118 {
119 	unsigned int i;
120 
121 	cio2->dummy_page = dma_alloc_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
122 					      &cio2->dummy_page_bus_addr,
123 					      GFP_KERNEL);
124 	cio2->dummy_lop = dma_alloc_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
125 					     &cio2->dummy_lop_bus_addr,
126 					     GFP_KERNEL);
127 	if (!cio2->dummy_page || !cio2->dummy_lop) {
128 		cio2_fbpt_exit_dummy(cio2);
129 		return -ENOMEM;
130 	}
131 	/*
132 	 * List of Pointers(LOP) contains 1024x32b pointers to 4KB page each
133 	 * Initialize each entry to dummy_page bus base address.
134 	 */
135 	for (i = 0; i < CIO2_LOP_ENTRIES; i++)
136 		cio2->dummy_lop[i] = PFN_DOWN(cio2->dummy_page_bus_addr);
137 
138 	return 0;
139 }
140 
cio2_fbpt_entry_enable(struct cio2_device * cio2,struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])141 static void cio2_fbpt_entry_enable(struct cio2_device *cio2,
142 				   struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])
143 {
144 	/*
145 	 * The CPU first initializes some fields in fbpt, then sets
146 	 * the VALID bit, this barrier is to ensure that the DMA(device)
147 	 * does not see the VALID bit enabled before other fields are
148 	 * initialized; otherwise it could lead to havoc.
149 	 */
150 	dma_wmb();
151 
152 	/*
153 	 * Request interrupts for start and completion
154 	 * Valid bit is applicable only to 1st entry
155 	 */
156 	entry[0].first_entry.ctrl = CIO2_FBPT_CTRL_VALID |
157 		CIO2_FBPT_CTRL_IOC | CIO2_FBPT_CTRL_IOS;
158 }
159 
160 /* Initialize fpbt entries to point to dummy frame */
cio2_fbpt_entry_init_dummy(struct cio2_device * cio2,struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])161 static void cio2_fbpt_entry_init_dummy(struct cio2_device *cio2,
162 				       struct cio2_fbpt_entry
163 				       entry[CIO2_MAX_LOPS])
164 {
165 	unsigned int i;
166 
167 	entry[0].first_entry.first_page_offset = 0;
168 	entry[1].second_entry.num_of_pages = CIO2_LOP_ENTRIES * CIO2_MAX_LOPS;
169 	entry[1].second_entry.last_page_available_bytes = PAGE_SIZE - 1;
170 
171 	for (i = 0; i < CIO2_MAX_LOPS; i++)
172 		entry[i].lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr);
173 
174 	cio2_fbpt_entry_enable(cio2, entry);
175 }
176 
177 /* Initialize fpbt entries to point to a given buffer */
cio2_fbpt_entry_init_buf(struct cio2_device * cio2,struct cio2_buffer * b,struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])178 static void cio2_fbpt_entry_init_buf(struct cio2_device *cio2,
179 				     struct cio2_buffer *b,
180 				     struct cio2_fbpt_entry
181 				     entry[CIO2_MAX_LOPS])
182 {
183 	struct vb2_buffer *vb = &b->vbb.vb2_buf;
184 	unsigned int length = vb->planes[0].length;
185 	int remaining, i;
186 
187 	entry[0].first_entry.first_page_offset = b->offset;
188 	remaining = length + entry[0].first_entry.first_page_offset;
189 	entry[1].second_entry.num_of_pages = PFN_UP(remaining);
190 	/*
191 	 * last_page_available_bytes has the offset of the last byte in the
192 	 * last page which is still accessible by DMA. DMA cannot access
193 	 * beyond this point. Valid range for this is from 0 to 4095.
194 	 * 0 indicates 1st byte in the page is DMA accessible.
195 	 * 4095 (PAGE_SIZE - 1) means every single byte in the last page
196 	 * is available for DMA transfer.
197 	 */
198 	remaining = offset_in_page(remaining) ?: PAGE_SIZE;
199 	entry[1].second_entry.last_page_available_bytes = remaining - 1;
200 	/* Fill FBPT */
201 	remaining = length;
202 	i = 0;
203 	while (remaining > 0) {
204 		entry->lop_page_addr = PFN_DOWN(b->lop_bus_addr[i]);
205 		remaining -= CIO2_LOP_ENTRIES * PAGE_SIZE;
206 		entry++;
207 		i++;
208 	}
209 
210 	/*
211 	 * The first not meaningful FBPT entry should point to a valid LOP
212 	 */
213 	entry->lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr);
214 
215 	cio2_fbpt_entry_enable(cio2, entry);
216 }
217 
cio2_fbpt_init(struct cio2_device * cio2,struct cio2_queue * q)218 static int cio2_fbpt_init(struct cio2_device *cio2, struct cio2_queue *q)
219 {
220 	struct device *dev = &cio2->pci_dev->dev;
221 
222 	q->fbpt = dma_alloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr,
223 				     GFP_KERNEL);
224 	if (!q->fbpt)
225 		return -ENOMEM;
226 
227 	return 0;
228 }
229 
cio2_fbpt_exit(struct cio2_queue * q,struct device * dev)230 static void cio2_fbpt_exit(struct cio2_queue *q, struct device *dev)
231 {
232 	dma_free_coherent(dev, CIO2_FBPT_SIZE, q->fbpt, q->fbpt_bus_addr);
233 }
234 
235 /**************** CSI2 hardware setup ****************/
236 
237 /*
238  * The CSI2 receiver has several parameters affecting
239  * the receiver timings. These depend on the MIPI bus frequency
240  * F in Hz (sensor transmitter rate) as follows:
241  *     register value = (A/1e9 + B * UI) / COUNT_ACC
242  * where
243  *      UI = 1 / (2 * F) in seconds
244  *      COUNT_ACC = counter accuracy in seconds
245  *      For IPU3 COUNT_ACC = 0.0625
246  *
247  * A and B are coefficients from the table below,
248  * depending whether the register minimum or maximum value is
249  * calculated.
250  *                                     Minimum     Maximum
251  * Clock lane                          A     B     A     B
252  * reg_rx_csi_dly_cnt_termen_clane     0     0    38     0
253  * reg_rx_csi_dly_cnt_settle_clane    95    -8   300   -16
254  * Data lanes
255  * reg_rx_csi_dly_cnt_termen_dlane0    0     0    35     4
256  * reg_rx_csi_dly_cnt_settle_dlane0   85    -2   145    -6
257  * reg_rx_csi_dly_cnt_termen_dlane1    0     0    35     4
258  * reg_rx_csi_dly_cnt_settle_dlane1   85    -2   145    -6
259  * reg_rx_csi_dly_cnt_termen_dlane2    0     0    35     4
260  * reg_rx_csi_dly_cnt_settle_dlane2   85    -2   145    -6
261  * reg_rx_csi_dly_cnt_termen_dlane3    0     0    35     4
262  * reg_rx_csi_dly_cnt_settle_dlane3   85    -2   145    -6
263  *
264  * We use the minimum values of both A and B.
265  */
266 
267 /*
268  * shift for keeping value range suitable for 32-bit integer arithmetic
269  */
270 #define LIMIT_SHIFT	8
271 
cio2_rx_timing(s32 a,s32 b,s64 freq,int def)272 static s32 cio2_rx_timing(s32 a, s32 b, s64 freq, int def)
273 {
274 	const u32 accinv = 16; /* invert of counter resolution */
275 	const u32 uiinv = 500000000; /* 1e9 / 2 */
276 	s32 r;
277 
278 	freq >>= LIMIT_SHIFT;
279 
280 	if (WARN_ON(freq <= 0 || freq > S32_MAX))
281 		return def;
282 	/*
283 	 * b could be 0, -2 or -8, so |accinv * b| is always
284 	 * less than (1 << ds) and thus |r| < 500000000.
285 	 */
286 	r = accinv * b * (uiinv >> LIMIT_SHIFT);
287 	r = r / (s32)freq;
288 	/* max value of a is 95 */
289 	r += accinv * a;
290 
291 	return r;
292 };
293 
294 /* Calculate the delay value for termination enable of clock lane HS Rx */
cio2_csi2_calc_timing(struct cio2_device * cio2,struct cio2_queue * q,struct cio2_csi2_timing * timing,unsigned int bpp,unsigned int lanes)295 static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
296 				 struct cio2_csi2_timing *timing,
297 				 unsigned int bpp, unsigned int lanes)
298 {
299 	struct device *dev = &cio2->pci_dev->dev;
300 	s64 freq;
301 
302 	if (!q->sensor)
303 		return -ENODEV;
304 
305 	freq = v4l2_get_link_freq(q->sensor->ctrl_handler, bpp, lanes * 2);
306 	if (freq < 0) {
307 		dev_err(dev, "error %lld, invalid link_freq\n", freq);
308 		return freq;
309 	}
310 
311 	timing->clk_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_A,
312 					    CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_B,
313 					    freq,
314 					    CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
315 	timing->clk_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_A,
316 					    CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_B,
317 					    freq,
318 					    CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
319 	timing->dat_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_A,
320 					    CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_B,
321 					    freq,
322 					    CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
323 	timing->dat_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_A,
324 					    CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_B,
325 					    freq,
326 					    CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
327 
328 	dev_dbg(dev, "freq ct value is %d\n", timing->clk_termen);
329 	dev_dbg(dev, "freq cs value is %d\n", timing->clk_settle);
330 	dev_dbg(dev, "freq dt value is %d\n", timing->dat_termen);
331 	dev_dbg(dev, "freq ds value is %d\n", timing->dat_settle);
332 
333 	return 0;
334 };
335 
cio2_hw_init(struct cio2_device * cio2,struct cio2_queue * q)336 static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q)
337 {
338 	static const int NUM_VCS = 4;
339 	static const int SID;	/* Stream id */
340 	static const int ENTRY;
341 	static const int FBPT_WIDTH = DIV_ROUND_UP(CIO2_MAX_LOPS,
342 					CIO2_FBPT_SUBENTRY_UNIT);
343 	const u32 num_buffers1 = CIO2_MAX_BUFFERS - 1;
344 	const struct ipu3_cio2_fmt *fmt;
345 	void __iomem *const base = cio2->base;
346 	u8 lanes, csi2bus = q->csi2.port;
347 	u8 sensor_vc = SENSOR_VIR_CH_DFLT;
348 	struct cio2_csi2_timing timing = { 0 };
349 	int i, r;
350 
351 	fmt = cio2_find_format(NULL, &q->subdev_fmt.code);
352 	if (!fmt)
353 		return -EINVAL;
354 
355 	lanes = q->csi2.lanes;
356 
357 	r = cio2_csi2_calc_timing(cio2, q, &timing, fmt->bpp, lanes);
358 	if (r)
359 		return r;
360 
361 	writel(timing.clk_termen, q->csi_rx_base +
362 		CIO2_REG_CSIRX_DLY_CNT_TERMEN(CIO2_CSIRX_DLY_CNT_CLANE_IDX));
363 	writel(timing.clk_settle, q->csi_rx_base +
364 		CIO2_REG_CSIRX_DLY_CNT_SETTLE(CIO2_CSIRX_DLY_CNT_CLANE_IDX));
365 
366 	for (i = 0; i < lanes; i++) {
367 		writel(timing.dat_termen, q->csi_rx_base +
368 			CIO2_REG_CSIRX_DLY_CNT_TERMEN(i));
369 		writel(timing.dat_settle, q->csi_rx_base +
370 			CIO2_REG_CSIRX_DLY_CNT_SETTLE(i));
371 	}
372 
373 	writel(CIO2_PBM_WMCTRL1_MIN_2CK |
374 	       CIO2_PBM_WMCTRL1_MID1_2CK |
375 	       CIO2_PBM_WMCTRL1_MID2_2CK, base + CIO2_REG_PBM_WMCTRL1);
376 	writel(CIO2_PBM_WMCTRL2_HWM_2CK << CIO2_PBM_WMCTRL2_HWM_2CK_SHIFT |
377 	       CIO2_PBM_WMCTRL2_LWM_2CK << CIO2_PBM_WMCTRL2_LWM_2CK_SHIFT |
378 	       CIO2_PBM_WMCTRL2_OBFFWM_2CK <<
379 	       CIO2_PBM_WMCTRL2_OBFFWM_2CK_SHIFT |
380 	       CIO2_PBM_WMCTRL2_TRANSDYN << CIO2_PBM_WMCTRL2_TRANSDYN_SHIFT |
381 	       CIO2_PBM_WMCTRL2_OBFF_MEM_EN, base + CIO2_REG_PBM_WMCTRL2);
382 	writel(CIO2_PBM_ARB_CTRL_LANES_DIV <<
383 	       CIO2_PBM_ARB_CTRL_LANES_DIV_SHIFT |
384 	       CIO2_PBM_ARB_CTRL_LE_EN |
385 	       CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN <<
386 	       CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN_SHIFT |
387 	       CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP <<
388 	       CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP_SHIFT,
389 	       base + CIO2_REG_PBM_ARB_CTRL);
390 	writel(CIO2_CSIRX_STATUS_DLANE_HS_MASK,
391 	       q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_HS);
392 	writel(CIO2_CSIRX_STATUS_DLANE_LP_MASK,
393 	       q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_LP);
394 
395 	writel(CIO2_FB_HPLL_FREQ, base + CIO2_REG_FB_HPLL_FREQ);
396 	writel(CIO2_ISCLK_RATIO, base + CIO2_REG_ISCLK_RATIO);
397 
398 	/* Configure MIPI backend */
399 	for (i = 0; i < NUM_VCS; i++)
400 		writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_SP_LUT_ENTRY(i));
401 
402 	/* There are 16 short packet LUT entry */
403 	for (i = 0; i < 16; i++)
404 		writel(CIO2_MIPIBE_LP_LUT_ENTRY_DISREGARD,
405 		       q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(i));
406 	writel(CIO2_MIPIBE_GLOBAL_LUT_DISREGARD,
407 	       q->csi_rx_base + CIO2_REG_MIPIBE_GLOBAL_LUT_DISREGARD);
408 
409 	writel(CIO2_INT_EN_EXT_IE_MASK, base + CIO2_REG_INT_EN_EXT_IE);
410 	writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
411 	writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
412 	writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_EDGE);
413 	writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_LEVEL_NOT_PULSE);
414 	writel(CIO2_INT_EN_EXT_OE_MASK, base + CIO2_REG_INT_EN_EXT_OE);
415 
416 	writel(CIO2_REG_INT_EN_IRQ | CIO2_INT_IOC(CIO2_DMA_CHAN) |
417 	       CIO2_REG_INT_EN_IOS(CIO2_DMA_CHAN),
418 	       base + CIO2_REG_INT_EN);
419 
420 	writel((CIO2_PXM_PXF_FMT_CFG_BPP_10 | CIO2_PXM_PXF_FMT_CFG_PCK_64B)
421 	       << CIO2_PXM_PXF_FMT_CFG_SID0_SHIFT,
422 	       base + CIO2_REG_PXM_PXF_FMT_CFG0(csi2bus));
423 	writel(SID << CIO2_MIPIBE_LP_LUT_ENTRY_SID_SHIFT |
424 	       sensor_vc << CIO2_MIPIBE_LP_LUT_ENTRY_VC_SHIFT |
425 	       fmt->mipicode << CIO2_MIPIBE_LP_LUT_ENTRY_FORMAT_TYPE_SHIFT,
426 	       q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(ENTRY));
427 	writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_COMP_FORMAT(sensor_vc));
428 	writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_FORCE_RAW8);
429 	writel(0, base + CIO2_REG_PXM_SID2BID0(csi2bus));
430 
431 	writel(lanes, q->csi_rx_base + CIO2_REG_CSIRX_NOF_ENABLED_LANES);
432 	writel(CIO2_CGC_PRIM_TGE |
433 	       CIO2_CGC_SIDE_TGE |
434 	       CIO2_CGC_XOSC_TGE |
435 	       CIO2_CGC_D3I3_TGE |
436 	       CIO2_CGC_CSI2_INTERFRAME_TGE |
437 	       CIO2_CGC_CSI2_PORT_DCGE |
438 	       CIO2_CGC_SIDE_DCGE |
439 	       CIO2_CGC_PRIM_DCGE |
440 	       CIO2_CGC_ROSC_DCGE |
441 	       CIO2_CGC_XOSC_DCGE |
442 	       CIO2_CGC_CLKGATE_HOLDOFF << CIO2_CGC_CLKGATE_HOLDOFF_SHIFT |
443 	       CIO2_CGC_CSI_CLKGATE_HOLDOFF
444 	       << CIO2_CGC_CSI_CLKGATE_HOLDOFF_SHIFT, base + CIO2_REG_CGC);
445 	writel(CIO2_LTRCTRL_LTRDYNEN, base + CIO2_REG_LTRCTRL);
446 	writel(CIO2_LTRVAL0_VAL << CIO2_LTRVAL02_VAL_SHIFT |
447 	       CIO2_LTRVAL0_SCALE << CIO2_LTRVAL02_SCALE_SHIFT |
448 	       CIO2_LTRVAL1_VAL << CIO2_LTRVAL13_VAL_SHIFT |
449 	       CIO2_LTRVAL1_SCALE << CIO2_LTRVAL13_SCALE_SHIFT,
450 	       base + CIO2_REG_LTRVAL01);
451 	writel(CIO2_LTRVAL2_VAL << CIO2_LTRVAL02_VAL_SHIFT |
452 	       CIO2_LTRVAL2_SCALE << CIO2_LTRVAL02_SCALE_SHIFT |
453 	       CIO2_LTRVAL3_VAL << CIO2_LTRVAL13_VAL_SHIFT |
454 	       CIO2_LTRVAL3_SCALE << CIO2_LTRVAL13_SCALE_SHIFT,
455 	       base + CIO2_REG_LTRVAL23);
456 
457 	for (i = 0; i < CIO2_NUM_DMA_CHAN; i++) {
458 		writel(0, base + CIO2_REG_CDMABA(i));
459 		writel(0, base + CIO2_REG_CDMAC0(i));
460 		writel(0, base + CIO2_REG_CDMAC1(i));
461 	}
462 
463 	/* Enable DMA */
464 	writel(PFN_DOWN(q->fbpt_bus_addr), base + CIO2_REG_CDMABA(CIO2_DMA_CHAN));
465 
466 	writel(num_buffers1 << CIO2_CDMAC0_FBPT_LEN_SHIFT |
467 	       FBPT_WIDTH << CIO2_CDMAC0_FBPT_WIDTH_SHIFT |
468 	       CIO2_CDMAC0_DMA_INTR_ON_FE |
469 	       CIO2_CDMAC0_FBPT_UPDATE_FIFO_FULL |
470 	       CIO2_CDMAC0_DMA_EN |
471 	       CIO2_CDMAC0_DMA_INTR_ON_FS |
472 	       CIO2_CDMAC0_DMA_HALTED, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
473 
474 	writel(1 << CIO2_CDMAC1_LINENUMUPDATE_SHIFT,
475 	       base + CIO2_REG_CDMAC1(CIO2_DMA_CHAN));
476 
477 	writel(0, base + CIO2_REG_PBM_FOPN_ABORT);
478 
479 	writel(CIO2_PXM_FRF_CFG_CRC_TH << CIO2_PXM_FRF_CFG_CRC_TH_SHIFT |
480 	       CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NR |
481 	       CIO2_PXM_FRF_CFG_MSK_ECC_RE |
482 	       CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NE,
483 	       base + CIO2_REG_PXM_FRF_CFG(q->csi2.port));
484 
485 	/* Clear interrupts */
486 	writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
487 	writel(~0, base + CIO2_REG_INT_STS_EXT_OE);
488 	writel(~0, base + CIO2_REG_INT_STS_EXT_IE);
489 	writel(~0, base + CIO2_REG_INT_STS);
490 
491 	/* Enable devices, starting from the last device in the pipe */
492 	writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
493 	writel(1, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
494 
495 	return 0;
496 }
497 
cio2_hw_exit(struct cio2_device * cio2,struct cio2_queue * q)498 static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q)
499 {
500 	void __iomem *const base = cio2->base;
501 	unsigned int i;
502 	u32 value;
503 	int ret;
504 
505 	/* Disable CSI receiver and MIPI backend devices */
506 	writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
507 	writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
508 	writel(0, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
509 	writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
510 
511 	/* Halt DMA */
512 	writel(0, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
513 	ret = readl_poll_timeout(base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN),
514 				 value, value & CIO2_CDMAC0_DMA_HALTED,
515 				 4000, 2000000);
516 	if (ret)
517 		dev_err(&cio2->pci_dev->dev,
518 			"DMA %i can not be halted\n", CIO2_DMA_CHAN);
519 
520 	for (i = 0; i < CIO2_NUM_PORTS; i++) {
521 		writel(readl(base + CIO2_REG_PXM_FRF_CFG(i)) |
522 		       CIO2_PXM_FRF_CFG_ABORT, base + CIO2_REG_PXM_FRF_CFG(i));
523 		writel(readl(base + CIO2_REG_PBM_FOPN_ABORT) |
524 		       CIO2_PBM_FOPN_ABORT(i), base + CIO2_REG_PBM_FOPN_ABORT);
525 	}
526 }
527 
cio2_buffer_done(struct cio2_device * cio2,unsigned int dma_chan)528 static void cio2_buffer_done(struct cio2_device *cio2, unsigned int dma_chan)
529 {
530 	struct device *dev = &cio2->pci_dev->dev;
531 	struct cio2_queue *q = cio2->cur_queue;
532 	struct cio2_fbpt_entry *entry;
533 	u64 ns = ktime_get_ns();
534 
535 	if (dma_chan >= CIO2_QUEUES) {
536 		dev_err(dev, "bad DMA channel %i\n", dma_chan);
537 		return;
538 	}
539 
540 	entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
541 	if (entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID) {
542 		dev_warn(&cio2->pci_dev->dev,
543 			 "no ready buffers found on DMA channel %u\n",
544 			 dma_chan);
545 		return;
546 	}
547 
548 	/* Find out which buffer(s) are ready */
549 	do {
550 		struct cio2_buffer *b;
551 
552 		b = q->bufs[q->bufs_first];
553 		if (b) {
554 			unsigned int received = entry[1].second_entry.num_of_bytes;
555 			unsigned long payload =
556 				vb2_get_plane_payload(&b->vbb.vb2_buf, 0);
557 
558 			q->bufs[q->bufs_first] = NULL;
559 			atomic_dec(&q->bufs_queued);
560 			dev_dbg(&cio2->pci_dev->dev,
561 				"buffer %i done\n", b->vbb.vb2_buf.index);
562 
563 			b->vbb.vb2_buf.timestamp = ns;
564 			b->vbb.field = V4L2_FIELD_NONE;
565 			b->vbb.sequence = atomic_read(&q->frame_sequence);
566 			if (payload != received)
567 				dev_warn(dev,
568 					 "payload length is %lu, received %u\n",
569 					 payload, received);
570 			vb2_buffer_done(&b->vbb.vb2_buf, VB2_BUF_STATE_DONE);
571 		}
572 		atomic_inc(&q->frame_sequence);
573 		cio2_fbpt_entry_init_dummy(cio2, entry);
574 		q->bufs_first = (q->bufs_first + 1) % CIO2_MAX_BUFFERS;
575 		entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
576 	} while (!(entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID));
577 }
578 
cio2_queue_event_sof(struct cio2_device * cio2,struct cio2_queue * q)579 static void cio2_queue_event_sof(struct cio2_device *cio2, struct cio2_queue *q)
580 {
581 	/*
582 	 * For the user space camera control algorithms it is essential
583 	 * to know when the reception of a frame has begun. That's often
584 	 * the best timing information to get from the hardware.
585 	 */
586 	struct v4l2_event event = {
587 		.type = V4L2_EVENT_FRAME_SYNC,
588 		.u.frame_sync.frame_sequence = atomic_read(&q->frame_sequence),
589 	};
590 
591 	v4l2_event_queue(q->subdev.devnode, &event);
592 }
593 
594 static const char *const cio2_irq_errs[] = {
595 	"single packet header error corrected",
596 	"multiple packet header errors detected",
597 	"payload checksum (CRC) error",
598 	"fifo overflow",
599 	"reserved short packet data type detected",
600 	"reserved long packet data type detected",
601 	"incomplete long packet detected",
602 	"frame sync error",
603 	"line sync error",
604 	"DPHY start of transmission error",
605 	"DPHY synchronization error",
606 	"escape mode error",
607 	"escape mode trigger event",
608 	"escape mode ultra-low power state for data lane(s)",
609 	"escape mode ultra-low power state exit for clock lane",
610 	"inter-frame short packet discarded",
611 	"inter-frame long packet discarded",
612 	"non-matching Long Packet stalled",
613 };
614 
615 static const char *const cio2_port_errs[] = {
616 	"ECC recoverable",
617 	"DPHY not recoverable",
618 	"ECC not recoverable",
619 	"CRC error",
620 	"INTERFRAMEDATA",
621 	"PKT2SHORT",
622 	"PKT2LONG",
623 };
624 
cio2_irq_handle_once(struct cio2_device * cio2,u32 int_status)625 static void cio2_irq_handle_once(struct cio2_device *cio2, u32 int_status)
626 {
627 	void __iomem *const base = cio2->base;
628 	struct device *dev = &cio2->pci_dev->dev;
629 
630 	if (int_status & CIO2_INT_IOOE) {
631 		/*
632 		 * Interrupt on Output Error:
633 		 * 1) SRAM is full and FS received, or
634 		 * 2) An invalid bit detected by DMA.
635 		 */
636 		u32 oe_status, oe_clear;
637 
638 		oe_clear = readl(base + CIO2_REG_INT_STS_EXT_OE);
639 		oe_status = oe_clear;
640 
641 		if (oe_status & CIO2_INT_EXT_OE_DMAOE_MASK) {
642 			dev_err(dev, "DMA output error: 0x%x\n",
643 				(oe_status & CIO2_INT_EXT_OE_DMAOE_MASK)
644 				>> CIO2_INT_EXT_OE_DMAOE_SHIFT);
645 			oe_status &= ~CIO2_INT_EXT_OE_DMAOE_MASK;
646 		}
647 		if (oe_status & CIO2_INT_EXT_OE_OES_MASK) {
648 			dev_err(dev, "DMA output error on CSI2 buses: 0x%x\n",
649 				(oe_status & CIO2_INT_EXT_OE_OES_MASK)
650 				>> CIO2_INT_EXT_OE_OES_SHIFT);
651 			oe_status &= ~CIO2_INT_EXT_OE_OES_MASK;
652 		}
653 		writel(oe_clear, base + CIO2_REG_INT_STS_EXT_OE);
654 		if (oe_status)
655 			dev_warn(dev, "unknown interrupt 0x%x on OE\n",
656 				 oe_status);
657 		int_status &= ~CIO2_INT_IOOE;
658 	}
659 
660 	if (int_status & CIO2_INT_IOC_MASK) {
661 		/* DMA IO done -- frame ready */
662 		u32 clr = 0;
663 		unsigned int d;
664 
665 		for (d = 0; d < CIO2_NUM_DMA_CHAN; d++)
666 			if (int_status & CIO2_INT_IOC(d)) {
667 				clr |= CIO2_INT_IOC(d);
668 				cio2_buffer_done(cio2, d);
669 			}
670 		int_status &= ~clr;
671 	}
672 
673 	if (int_status & CIO2_INT_IOS_IOLN_MASK) {
674 		/* DMA IO starts or reached specified line */
675 		u32 clr = 0;
676 		unsigned int d;
677 
678 		for (d = 0; d < CIO2_NUM_DMA_CHAN; d++)
679 			if (int_status & CIO2_INT_IOS_IOLN(d)) {
680 				clr |= CIO2_INT_IOS_IOLN(d);
681 				if (d == CIO2_DMA_CHAN)
682 					cio2_queue_event_sof(cio2,
683 							     cio2->cur_queue);
684 			}
685 		int_status &= ~clr;
686 	}
687 
688 	if (int_status & (CIO2_INT_IOIE | CIO2_INT_IOIRQ)) {
689 		/* CSI2 receiver (error) interrupt */
690 		u32 ie_status, ie_clear;
691 		unsigned int port;
692 
693 		ie_clear = readl(base + CIO2_REG_INT_STS_EXT_IE);
694 		ie_status = ie_clear;
695 
696 		for (port = 0; port < CIO2_NUM_PORTS; port++) {
697 			u32 port_status = (ie_status >> (port * 8)) & 0xff;
698 			u32 err_mask = BIT_MASK(ARRAY_SIZE(cio2_port_errs)) - 1;
699 			void __iomem *const csi_rx_base =
700 						base + CIO2_REG_PIPE_BASE(port);
701 			unsigned int i;
702 
703 			while (port_status & err_mask) {
704 				i = ffs(port_status) - 1;
705 				dev_err(dev, "port %i error %s\n",
706 					port, cio2_port_errs[i]);
707 				ie_status &= ~BIT(port * 8 + i);
708 				port_status &= ~BIT(i);
709 			}
710 
711 			if (ie_status & CIO2_INT_EXT_IE_IRQ(port)) {
712 				u32 csi2_status, csi2_clear;
713 
714 				csi2_status = readl(csi_rx_base +
715 						CIO2_REG_IRQCTRL_STATUS);
716 				csi2_clear = csi2_status;
717 				err_mask =
718 					BIT_MASK(ARRAY_SIZE(cio2_irq_errs)) - 1;
719 
720 				while (csi2_status & err_mask) {
721 					i = ffs(csi2_status) - 1;
722 					dev_err(dev,
723 						"CSI-2 receiver port %i: %s\n",
724 							port, cio2_irq_errs[i]);
725 					csi2_status &= ~BIT(i);
726 				}
727 
728 				writel(csi2_clear,
729 				       csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
730 				if (csi2_status)
731 					dev_warn(dev,
732 						 "unknown CSI2 error 0x%x on port %i\n",
733 						 csi2_status, port);
734 
735 				ie_status &= ~CIO2_INT_EXT_IE_IRQ(port);
736 			}
737 		}
738 
739 		writel(ie_clear, base + CIO2_REG_INT_STS_EXT_IE);
740 		if (ie_status)
741 			dev_warn(dev, "unknown interrupt 0x%x on IE\n",
742 				 ie_status);
743 
744 		int_status &= ~(CIO2_INT_IOIE | CIO2_INT_IOIRQ);
745 	}
746 
747 	if (int_status)
748 		dev_warn(dev, "unknown interrupt 0x%x on INT\n", int_status);
749 }
750 
cio2_irq(int irq,void * cio2_ptr)751 static irqreturn_t cio2_irq(int irq, void *cio2_ptr)
752 {
753 	struct cio2_device *cio2 = cio2_ptr;
754 	void __iomem *const base = cio2->base;
755 	struct device *dev = &cio2->pci_dev->dev;
756 	u32 int_status;
757 
758 	int_status = readl(base + CIO2_REG_INT_STS);
759 	dev_dbg(dev, "isr enter - interrupt status 0x%x\n", int_status);
760 	if (!int_status)
761 		return IRQ_NONE;
762 
763 	do {
764 		writel(int_status, base + CIO2_REG_INT_STS);
765 		cio2_irq_handle_once(cio2, int_status);
766 		int_status = readl(base + CIO2_REG_INT_STS);
767 		if (int_status)
768 			dev_dbg(dev, "pending status 0x%x\n", int_status);
769 	} while (int_status);
770 
771 	return IRQ_HANDLED;
772 }
773 
774 /**************** Videobuf2 interface ****************/
775 
cio2_vb2_return_all_buffers(struct cio2_queue * q,enum vb2_buffer_state state)776 static void cio2_vb2_return_all_buffers(struct cio2_queue *q,
777 					enum vb2_buffer_state state)
778 {
779 	unsigned int i;
780 
781 	for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
782 		if (q->bufs[i]) {
783 			atomic_dec(&q->bufs_queued);
784 			vb2_buffer_done(&q->bufs[i]->vbb.vb2_buf,
785 					state);
786 			q->bufs[i] = NULL;
787 		}
788 	}
789 }
790 
cio2_vb2_queue_setup(struct vb2_queue * vq,unsigned int * num_buffers,unsigned int * num_planes,unsigned int sizes[],struct device * alloc_devs[])791 static int cio2_vb2_queue_setup(struct vb2_queue *vq,
792 				unsigned int *num_buffers,
793 				unsigned int *num_planes,
794 				unsigned int sizes[],
795 				struct device *alloc_devs[])
796 {
797 	struct cio2_device *cio2 = vb2_get_drv_priv(vq);
798 	struct cio2_queue *q = vb2q_to_cio2_queue(vq);
799 	unsigned int i;
800 
801 	*num_planes = q->format.num_planes;
802 
803 	for (i = 0; i < *num_planes; ++i) {
804 		sizes[i] = q->format.plane_fmt[i].sizeimage;
805 		alloc_devs[i] = &cio2->pci_dev->dev;
806 	}
807 
808 	*num_buffers = clamp_val(*num_buffers, 1, CIO2_MAX_BUFFERS);
809 
810 	/* Initialize buffer queue */
811 	for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
812 		q->bufs[i] = NULL;
813 		cio2_fbpt_entry_init_dummy(cio2, &q->fbpt[i * CIO2_MAX_LOPS]);
814 	}
815 	atomic_set(&q->bufs_queued, 0);
816 	q->bufs_first = 0;
817 	q->bufs_next = 0;
818 
819 	return 0;
820 }
821 
822 /* Called after each buffer is allocated */
cio2_vb2_buf_init(struct vb2_buffer * vb)823 static int cio2_vb2_buf_init(struct vb2_buffer *vb)
824 {
825 	struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
826 	struct device *dev = &cio2->pci_dev->dev;
827 	struct cio2_buffer *b =
828 		container_of(vb, struct cio2_buffer, vbb.vb2_buf);
829 	unsigned int pages = PFN_UP(vb->planes[0].length);
830 	unsigned int lops = DIV_ROUND_UP(pages + 1, CIO2_LOP_ENTRIES);
831 	struct sg_table *sg;
832 	struct sg_dma_page_iter sg_iter;
833 	unsigned int i, j;
834 
835 	if (lops <= 0 || lops > CIO2_MAX_LOPS) {
836 		dev_err(dev, "%s: bad buffer size (%i)\n", __func__,
837 			vb->planes[0].length);
838 		return -ENOSPC;		/* Should never happen */
839 	}
840 
841 	memset(b->lop, 0, sizeof(b->lop));
842 	/* Allocate LOP table */
843 	for (i = 0; i < lops; i++) {
844 		b->lop[i] = dma_alloc_coherent(dev, PAGE_SIZE,
845 					       &b->lop_bus_addr[i], GFP_KERNEL);
846 		if (!b->lop[i])
847 			goto fail;
848 	}
849 
850 	/* Fill LOP */
851 	sg = vb2_dma_sg_plane_desc(vb, 0);
852 	if (!sg)
853 		return -ENOMEM;
854 
855 	if (sg->nents && sg->sgl)
856 		b->offset = sg->sgl->offset;
857 
858 	i = j = 0;
859 	for_each_sg_dma_page(sg->sgl, &sg_iter, sg->nents, 0) {
860 		if (!pages--)
861 			break;
862 		b->lop[i][j] = PFN_DOWN(sg_page_iter_dma_address(&sg_iter));
863 		j++;
864 		if (j == CIO2_LOP_ENTRIES) {
865 			i++;
866 			j = 0;
867 		}
868 	}
869 
870 	b->lop[i][j] = PFN_DOWN(cio2->dummy_page_bus_addr);
871 	return 0;
872 fail:
873 	while (i--)
874 		dma_free_coherent(dev, PAGE_SIZE, b->lop[i], b->lop_bus_addr[i]);
875 	return -ENOMEM;
876 }
877 
878 /* Transfer buffer ownership to cio2 */
cio2_vb2_buf_queue(struct vb2_buffer * vb)879 static void cio2_vb2_buf_queue(struct vb2_buffer *vb)
880 {
881 	struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
882 	struct cio2_queue *q =
883 		container_of(vb->vb2_queue, struct cio2_queue, vbq);
884 	struct cio2_buffer *b =
885 		container_of(vb, struct cio2_buffer, vbb.vb2_buf);
886 	struct cio2_fbpt_entry *entry;
887 	unsigned long flags;
888 	unsigned int i, j, next = q->bufs_next;
889 	int bufs_queued = atomic_inc_return(&q->bufs_queued);
890 	u32 fbpt_rp;
891 
892 	dev_dbg(&cio2->pci_dev->dev, "queue buffer %d\n", vb->index);
893 
894 	/*
895 	 * This code queues the buffer to the CIO2 DMA engine, which starts
896 	 * running once streaming has started. It is possible that this code
897 	 * gets pre-empted due to increased CPU load. Upon this, the driver
898 	 * does not get an opportunity to queue new buffers to the CIO2 DMA
899 	 * engine. When the DMA engine encounters an FBPT entry without the
900 	 * VALID bit set, the DMA engine halts, which requires a restart of
901 	 * the DMA engine and sensor, to continue streaming.
902 	 * This is not desired and is highly unlikely given that there are
903 	 * 32 FBPT entries that the DMA engine needs to process, to run into
904 	 * an FBPT entry, without the VALID bit set. We try to mitigate this
905 	 * by disabling interrupts for the duration of this queueing.
906 	 */
907 	local_irq_save(flags);
908 
909 	fbpt_rp = (readl(cio2->base + CIO2_REG_CDMARI(CIO2_DMA_CHAN))
910 		   >> CIO2_CDMARI_FBPT_RP_SHIFT)
911 		   & CIO2_CDMARI_FBPT_RP_MASK;
912 
913 	/*
914 	 * fbpt_rp is the fbpt entry that the dma is currently working
915 	 * on, but since it could jump to next entry at any time,
916 	 * assume that we might already be there.
917 	 */
918 	fbpt_rp = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
919 
920 	if (bufs_queued <= 1 || fbpt_rp == next)
921 		/* Buffers were drained */
922 		next = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
923 
924 	for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
925 		/*
926 		 * We have allocated CIO2_MAX_BUFFERS circularly for the
927 		 * hw, the user has requested N buffer queue. The driver
928 		 * ensures N <= CIO2_MAX_BUFFERS and guarantees that whenever
929 		 * user queues a buffer, there necessarily is a free buffer.
930 		 */
931 		if (!q->bufs[next]) {
932 			q->bufs[next] = b;
933 			entry = &q->fbpt[next * CIO2_MAX_LOPS];
934 			cio2_fbpt_entry_init_buf(cio2, b, entry);
935 			local_irq_restore(flags);
936 			q->bufs_next = (next + 1) % CIO2_MAX_BUFFERS;
937 			for (j = 0; j < vb->num_planes; j++)
938 				vb2_set_plane_payload(vb, j,
939 					q->format.plane_fmt[j].sizeimage);
940 			return;
941 		}
942 
943 		dev_dbg(&cio2->pci_dev->dev, "entry %i was full!\n", next);
944 		next = (next + 1) % CIO2_MAX_BUFFERS;
945 	}
946 
947 	local_irq_restore(flags);
948 	dev_err(&cio2->pci_dev->dev, "error: all cio2 entries were full!\n");
949 	atomic_dec(&q->bufs_queued);
950 	vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
951 }
952 
953 /* Called when each buffer is freed */
cio2_vb2_buf_cleanup(struct vb2_buffer * vb)954 static void cio2_vb2_buf_cleanup(struct vb2_buffer *vb)
955 {
956 	struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
957 	struct cio2_buffer *b =
958 		container_of(vb, struct cio2_buffer, vbb.vb2_buf);
959 	unsigned int i;
960 
961 	/* Free LOP table */
962 	for (i = 0; i < CIO2_MAX_LOPS; i++) {
963 		if (b->lop[i])
964 			dma_free_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
965 					  b->lop[i], b->lop_bus_addr[i]);
966 	}
967 }
968 
cio2_vb2_start_streaming(struct vb2_queue * vq,unsigned int count)969 static int cio2_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
970 {
971 	struct cio2_queue *q = vb2q_to_cio2_queue(vq);
972 	struct cio2_device *cio2 = vb2_get_drv_priv(vq);
973 	int r;
974 
975 	cio2->cur_queue = q;
976 	atomic_set(&q->frame_sequence, 0);
977 
978 	r = pm_runtime_resume_and_get(&cio2->pci_dev->dev);
979 	if (r < 0) {
980 		dev_info(&cio2->pci_dev->dev, "failed to set power %d\n", r);
981 		return r;
982 	}
983 
984 	r = media_pipeline_start(&q->vdev.entity, &q->pipe);
985 	if (r)
986 		goto fail_pipeline;
987 
988 	r = cio2_hw_init(cio2, q);
989 	if (r)
990 		goto fail_hw;
991 
992 	/* Start streaming on sensor */
993 	r = v4l2_subdev_call(q->sensor, video, s_stream, 1);
994 	if (r)
995 		goto fail_csi2_subdev;
996 
997 	cio2->streaming = true;
998 
999 	return 0;
1000 
1001 fail_csi2_subdev:
1002 	cio2_hw_exit(cio2, q);
1003 fail_hw:
1004 	media_pipeline_stop(&q->vdev.entity);
1005 fail_pipeline:
1006 	dev_dbg(&cio2->pci_dev->dev, "failed to start streaming (%d)\n", r);
1007 	cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_QUEUED);
1008 	pm_runtime_put(&cio2->pci_dev->dev);
1009 
1010 	return r;
1011 }
1012 
cio2_vb2_stop_streaming(struct vb2_queue * vq)1013 static void cio2_vb2_stop_streaming(struct vb2_queue *vq)
1014 {
1015 	struct cio2_queue *q = vb2q_to_cio2_queue(vq);
1016 	struct cio2_device *cio2 = vb2_get_drv_priv(vq);
1017 
1018 	if (v4l2_subdev_call(q->sensor, video, s_stream, 0))
1019 		dev_err(&cio2->pci_dev->dev,
1020 			"failed to stop sensor streaming\n");
1021 
1022 	cio2_hw_exit(cio2, q);
1023 	synchronize_irq(cio2->pci_dev->irq);
1024 	cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_ERROR);
1025 	media_pipeline_stop(&q->vdev.entity);
1026 	pm_runtime_put(&cio2->pci_dev->dev);
1027 	cio2->streaming = false;
1028 }
1029 
1030 static const struct vb2_ops cio2_vb2_ops = {
1031 	.buf_init = cio2_vb2_buf_init,
1032 	.buf_queue = cio2_vb2_buf_queue,
1033 	.buf_cleanup = cio2_vb2_buf_cleanup,
1034 	.queue_setup = cio2_vb2_queue_setup,
1035 	.start_streaming = cio2_vb2_start_streaming,
1036 	.stop_streaming = cio2_vb2_stop_streaming,
1037 	.wait_prepare = vb2_ops_wait_prepare,
1038 	.wait_finish = vb2_ops_wait_finish,
1039 };
1040 
1041 /**************** V4L2 interface ****************/
1042 
cio2_v4l2_querycap(struct file * file,void * fh,struct v4l2_capability * cap)1043 static int cio2_v4l2_querycap(struct file *file, void *fh,
1044 			      struct v4l2_capability *cap)
1045 {
1046 	struct cio2_device *cio2 = video_drvdata(file);
1047 
1048 	strscpy(cap->driver, CIO2_NAME, sizeof(cap->driver));
1049 	strscpy(cap->card, CIO2_DEVICE_NAME, sizeof(cap->card));
1050 	snprintf(cap->bus_info, sizeof(cap->bus_info),
1051 		 "PCI:%s", pci_name(cio2->pci_dev));
1052 
1053 	return 0;
1054 }
1055 
cio2_v4l2_enum_fmt(struct file * file,void * fh,struct v4l2_fmtdesc * f)1056 static int cio2_v4l2_enum_fmt(struct file *file, void *fh,
1057 			      struct v4l2_fmtdesc *f)
1058 {
1059 	if (f->index >= ARRAY_SIZE(formats))
1060 		return -EINVAL;
1061 
1062 	f->pixelformat = formats[f->index].fourcc;
1063 
1064 	return 0;
1065 }
1066 
1067 /* The format is validated in cio2_video_link_validate() */
cio2_v4l2_g_fmt(struct file * file,void * fh,struct v4l2_format * f)1068 static int cio2_v4l2_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
1069 {
1070 	struct cio2_queue *q = file_to_cio2_queue(file);
1071 
1072 	f->fmt.pix_mp = q->format;
1073 
1074 	return 0;
1075 }
1076 
cio2_v4l2_try_fmt(struct file * file,void * fh,struct v4l2_format * f)1077 static int cio2_v4l2_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
1078 {
1079 	const struct ipu3_cio2_fmt *fmt;
1080 	struct v4l2_pix_format_mplane *mpix = &f->fmt.pix_mp;
1081 
1082 	fmt = cio2_find_format(&mpix->pixelformat, NULL);
1083 	if (!fmt)
1084 		fmt = &formats[0];
1085 
1086 	/* Only supports up to 4224x3136 */
1087 	if (mpix->width > CIO2_IMAGE_MAX_WIDTH)
1088 		mpix->width = CIO2_IMAGE_MAX_WIDTH;
1089 	if (mpix->height > CIO2_IMAGE_MAX_HEIGHT)
1090 		mpix->height = CIO2_IMAGE_MAX_HEIGHT;
1091 
1092 	mpix->num_planes = 1;
1093 	mpix->pixelformat = fmt->fourcc;
1094 	mpix->colorspace = V4L2_COLORSPACE_RAW;
1095 	mpix->field = V4L2_FIELD_NONE;
1096 	mpix->plane_fmt[0].bytesperline = cio2_bytesperline(mpix->width);
1097 	mpix->plane_fmt[0].sizeimage = mpix->plane_fmt[0].bytesperline *
1098 							mpix->height;
1099 
1100 	/* use default */
1101 	mpix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
1102 	mpix->quantization = V4L2_QUANTIZATION_DEFAULT;
1103 	mpix->xfer_func = V4L2_XFER_FUNC_DEFAULT;
1104 
1105 	return 0;
1106 }
1107 
cio2_v4l2_s_fmt(struct file * file,void * fh,struct v4l2_format * f)1108 static int cio2_v4l2_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
1109 {
1110 	struct cio2_queue *q = file_to_cio2_queue(file);
1111 
1112 	cio2_v4l2_try_fmt(file, fh, f);
1113 	q->format = f->fmt.pix_mp;
1114 
1115 	return 0;
1116 }
1117 
1118 static int
cio2_video_enum_input(struct file * file,void * fh,struct v4l2_input * input)1119 cio2_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
1120 {
1121 	if (input->index > 0)
1122 		return -EINVAL;
1123 
1124 	strscpy(input->name, "camera", sizeof(input->name));
1125 	input->type = V4L2_INPUT_TYPE_CAMERA;
1126 
1127 	return 0;
1128 }
1129 
1130 static int
cio2_video_g_input(struct file * file,void * fh,unsigned int * input)1131 cio2_video_g_input(struct file *file, void *fh, unsigned int *input)
1132 {
1133 	*input = 0;
1134 
1135 	return 0;
1136 }
1137 
1138 static int
cio2_video_s_input(struct file * file,void * fh,unsigned int input)1139 cio2_video_s_input(struct file *file, void *fh, unsigned int input)
1140 {
1141 	return input == 0 ? 0 : -EINVAL;
1142 }
1143 
1144 static const struct v4l2_file_operations cio2_v4l2_fops = {
1145 	.owner = THIS_MODULE,
1146 	.unlocked_ioctl = video_ioctl2,
1147 	.open = v4l2_fh_open,
1148 	.release = vb2_fop_release,
1149 	.poll = vb2_fop_poll,
1150 	.mmap = vb2_fop_mmap,
1151 };
1152 
1153 static const struct v4l2_ioctl_ops cio2_v4l2_ioctl_ops = {
1154 	.vidioc_querycap = cio2_v4l2_querycap,
1155 	.vidioc_enum_fmt_vid_cap = cio2_v4l2_enum_fmt,
1156 	.vidioc_g_fmt_vid_cap_mplane = cio2_v4l2_g_fmt,
1157 	.vidioc_s_fmt_vid_cap_mplane = cio2_v4l2_s_fmt,
1158 	.vidioc_try_fmt_vid_cap_mplane = cio2_v4l2_try_fmt,
1159 	.vidioc_reqbufs = vb2_ioctl_reqbufs,
1160 	.vidioc_create_bufs = vb2_ioctl_create_bufs,
1161 	.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
1162 	.vidioc_querybuf = vb2_ioctl_querybuf,
1163 	.vidioc_qbuf = vb2_ioctl_qbuf,
1164 	.vidioc_dqbuf = vb2_ioctl_dqbuf,
1165 	.vidioc_streamon = vb2_ioctl_streamon,
1166 	.vidioc_streamoff = vb2_ioctl_streamoff,
1167 	.vidioc_expbuf = vb2_ioctl_expbuf,
1168 	.vidioc_enum_input = cio2_video_enum_input,
1169 	.vidioc_g_input	= cio2_video_g_input,
1170 	.vidioc_s_input	= cio2_video_s_input,
1171 };
1172 
cio2_subdev_subscribe_event(struct v4l2_subdev * sd,struct v4l2_fh * fh,struct v4l2_event_subscription * sub)1173 static int cio2_subdev_subscribe_event(struct v4l2_subdev *sd,
1174 				       struct v4l2_fh *fh,
1175 				       struct v4l2_event_subscription *sub)
1176 {
1177 	if (sub->type != V4L2_EVENT_FRAME_SYNC)
1178 		return -EINVAL;
1179 
1180 	/* Line number. For now only zero accepted. */
1181 	if (sub->id != 0)
1182 		return -EINVAL;
1183 
1184 	return v4l2_event_subscribe(fh, sub, 0, NULL);
1185 }
1186 
cio2_subdev_open(struct v4l2_subdev * sd,struct v4l2_subdev_fh * fh)1187 static int cio2_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
1188 {
1189 	struct v4l2_mbus_framefmt *format;
1190 	const struct v4l2_mbus_framefmt fmt_default = {
1191 		.width = 1936,
1192 		.height = 1096,
1193 		.code = formats[0].mbus_code,
1194 		.field = V4L2_FIELD_NONE,
1195 		.colorspace = V4L2_COLORSPACE_RAW,
1196 		.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT,
1197 		.quantization = V4L2_QUANTIZATION_DEFAULT,
1198 		.xfer_func = V4L2_XFER_FUNC_DEFAULT,
1199 	};
1200 
1201 	/* Initialize try_fmt */
1202 	format = v4l2_subdev_get_try_format(sd, fh->state, CIO2_PAD_SINK);
1203 	*format = fmt_default;
1204 
1205 	/* same as sink */
1206 	format = v4l2_subdev_get_try_format(sd, fh->state, CIO2_PAD_SOURCE);
1207 	*format = fmt_default;
1208 
1209 	return 0;
1210 }
1211 
1212 /*
1213  * cio2_subdev_get_fmt - Handle get format by pads subdev method
1214  * @sd : pointer to v4l2 subdev structure
1215  * @cfg: V4L2 subdev pad config
1216  * @fmt: pointer to v4l2 subdev format structure
1217  * return -EINVAL or zero on success
1218  */
cio2_subdev_get_fmt(struct v4l2_subdev * sd,struct v4l2_subdev_state * sd_state,struct v4l2_subdev_format * fmt)1219 static int cio2_subdev_get_fmt(struct v4l2_subdev *sd,
1220 			       struct v4l2_subdev_state *sd_state,
1221 			       struct v4l2_subdev_format *fmt)
1222 {
1223 	struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
1224 
1225 	mutex_lock(&q->subdev_lock);
1226 
1227 	if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
1228 		fmt->format = *v4l2_subdev_get_try_format(sd, sd_state,
1229 							  fmt->pad);
1230 	else
1231 		fmt->format = q->subdev_fmt;
1232 
1233 	mutex_unlock(&q->subdev_lock);
1234 
1235 	return 0;
1236 }
1237 
1238 /*
1239  * cio2_subdev_set_fmt - Handle set format by pads subdev method
1240  * @sd : pointer to v4l2 subdev structure
1241  * @cfg: V4L2 subdev pad config
1242  * @fmt: pointer to v4l2 subdev format structure
1243  * return -EINVAL or zero on success
1244  */
cio2_subdev_set_fmt(struct v4l2_subdev * sd,struct v4l2_subdev_state * sd_state,struct v4l2_subdev_format * fmt)1245 static int cio2_subdev_set_fmt(struct v4l2_subdev *sd,
1246 			       struct v4l2_subdev_state *sd_state,
1247 			       struct v4l2_subdev_format *fmt)
1248 {
1249 	struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
1250 	struct v4l2_mbus_framefmt *mbus;
1251 	u32 mbus_code = fmt->format.code;
1252 	unsigned int i;
1253 
1254 	/*
1255 	 * Only allow setting sink pad format;
1256 	 * source always propagates from sink
1257 	 */
1258 	if (fmt->pad == CIO2_PAD_SOURCE)
1259 		return cio2_subdev_get_fmt(sd, sd_state, fmt);
1260 
1261 	if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
1262 		mbus = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
1263 	else
1264 		mbus = &q->subdev_fmt;
1265 
1266 	fmt->format.code = formats[0].mbus_code;
1267 
1268 	for (i = 0; i < ARRAY_SIZE(formats); i++) {
1269 		if (formats[i].mbus_code == mbus_code) {
1270 			fmt->format.code = mbus_code;
1271 			break;
1272 		}
1273 	}
1274 
1275 	fmt->format.width = min(fmt->format.width, CIO2_IMAGE_MAX_WIDTH);
1276 	fmt->format.height = min(fmt->format.height, CIO2_IMAGE_MAX_HEIGHT);
1277 	fmt->format.field = V4L2_FIELD_NONE;
1278 
1279 	mutex_lock(&q->subdev_lock);
1280 	*mbus = fmt->format;
1281 	mutex_unlock(&q->subdev_lock);
1282 
1283 	return 0;
1284 }
1285 
cio2_subdev_enum_mbus_code(struct v4l2_subdev * sd,struct v4l2_subdev_state * sd_state,struct v4l2_subdev_mbus_code_enum * code)1286 static int cio2_subdev_enum_mbus_code(struct v4l2_subdev *sd,
1287 				      struct v4l2_subdev_state *sd_state,
1288 				      struct v4l2_subdev_mbus_code_enum *code)
1289 {
1290 	if (code->index >= ARRAY_SIZE(formats))
1291 		return -EINVAL;
1292 
1293 	code->code = formats[code->index].mbus_code;
1294 	return 0;
1295 }
1296 
cio2_subdev_link_validate_get_format(struct media_pad * pad,struct v4l2_subdev_format * fmt)1297 static int cio2_subdev_link_validate_get_format(struct media_pad *pad,
1298 						struct v4l2_subdev_format *fmt)
1299 {
1300 	if (is_media_entity_v4l2_subdev(pad->entity)) {
1301 		struct v4l2_subdev *sd =
1302 			media_entity_to_v4l2_subdev(pad->entity);
1303 
1304 		fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
1305 		fmt->pad = pad->index;
1306 		return v4l2_subdev_call(sd, pad, get_fmt, NULL, fmt);
1307 	}
1308 
1309 	return -EINVAL;
1310 }
1311 
cio2_video_link_validate(struct media_link * link)1312 static int cio2_video_link_validate(struct media_link *link)
1313 {
1314 	struct video_device *vd = container_of(link->sink->entity,
1315 						struct video_device, entity);
1316 	struct cio2_queue *q = container_of(vd, struct cio2_queue, vdev);
1317 	struct cio2_device *cio2 = video_get_drvdata(vd);
1318 	struct v4l2_subdev_format source_fmt;
1319 	int ret;
1320 
1321 	if (!media_entity_remote_pad(link->sink->entity->pads)) {
1322 		dev_info(&cio2->pci_dev->dev,
1323 			 "video node %s pad not connected\n", vd->name);
1324 		return -ENOTCONN;
1325 	}
1326 
1327 	ret = cio2_subdev_link_validate_get_format(link->source, &source_fmt);
1328 	if (ret < 0)
1329 		return 0;
1330 
1331 	if (source_fmt.format.width != q->format.width ||
1332 	    source_fmt.format.height != q->format.height) {
1333 		dev_err(&cio2->pci_dev->dev,
1334 			"Wrong width or height %ux%u (%ux%u expected)\n",
1335 			q->format.width, q->format.height,
1336 			source_fmt.format.width, source_fmt.format.height);
1337 		return -EINVAL;
1338 	}
1339 
1340 	if (!cio2_find_format(&q->format.pixelformat, &source_fmt.format.code))
1341 		return -EINVAL;
1342 
1343 	return 0;
1344 }
1345 
1346 static const struct v4l2_subdev_core_ops cio2_subdev_core_ops = {
1347 	.subscribe_event = cio2_subdev_subscribe_event,
1348 	.unsubscribe_event = v4l2_event_subdev_unsubscribe,
1349 };
1350 
1351 static const struct v4l2_subdev_internal_ops cio2_subdev_internal_ops = {
1352 	.open = cio2_subdev_open,
1353 };
1354 
1355 static const struct v4l2_subdev_pad_ops cio2_subdev_pad_ops = {
1356 	.link_validate = v4l2_subdev_link_validate_default,
1357 	.get_fmt = cio2_subdev_get_fmt,
1358 	.set_fmt = cio2_subdev_set_fmt,
1359 	.enum_mbus_code = cio2_subdev_enum_mbus_code,
1360 };
1361 
1362 static const struct v4l2_subdev_ops cio2_subdev_ops = {
1363 	.core = &cio2_subdev_core_ops,
1364 	.pad = &cio2_subdev_pad_ops,
1365 };
1366 
1367 /******* V4L2 sub-device asynchronous registration callbacks***********/
1368 
1369 struct sensor_async_subdev {
1370 	struct v4l2_async_subdev asd;
1371 	struct csi2_bus_info csi2;
1372 };
1373 
1374 /* The .bound() notifier callback when a match is found */
cio2_notifier_bound(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd,struct v4l2_async_subdev * asd)1375 static int cio2_notifier_bound(struct v4l2_async_notifier *notifier,
1376 			       struct v4l2_subdev *sd,
1377 			       struct v4l2_async_subdev *asd)
1378 {
1379 	struct cio2_device *cio2 = container_of(notifier,
1380 					struct cio2_device, notifier);
1381 	struct sensor_async_subdev *s_asd = container_of(asd,
1382 					struct sensor_async_subdev, asd);
1383 	struct cio2_queue *q;
1384 
1385 	if (cio2->queue[s_asd->csi2.port].sensor)
1386 		return -EBUSY;
1387 
1388 	q = &cio2->queue[s_asd->csi2.port];
1389 
1390 	q->csi2 = s_asd->csi2;
1391 	q->sensor = sd;
1392 	q->csi_rx_base = cio2->base + CIO2_REG_PIPE_BASE(q->csi2.port);
1393 
1394 	return 0;
1395 }
1396 
1397 /* The .unbind callback */
cio2_notifier_unbind(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd,struct v4l2_async_subdev * asd)1398 static void cio2_notifier_unbind(struct v4l2_async_notifier *notifier,
1399 				 struct v4l2_subdev *sd,
1400 				 struct v4l2_async_subdev *asd)
1401 {
1402 	struct cio2_device *cio2 = container_of(notifier,
1403 						struct cio2_device, notifier);
1404 	struct sensor_async_subdev *s_asd = container_of(asd,
1405 					struct sensor_async_subdev, asd);
1406 
1407 	cio2->queue[s_asd->csi2.port].sensor = NULL;
1408 }
1409 
1410 /* .complete() is called after all subdevices have been located */
cio2_notifier_complete(struct v4l2_async_notifier * notifier)1411 static int cio2_notifier_complete(struct v4l2_async_notifier *notifier)
1412 {
1413 	struct cio2_device *cio2 = container_of(notifier, struct cio2_device,
1414 						notifier);
1415 	struct sensor_async_subdev *s_asd;
1416 	struct v4l2_async_subdev *asd;
1417 	struct cio2_queue *q;
1418 	unsigned int pad;
1419 	int ret;
1420 
1421 	list_for_each_entry(asd, &cio2->notifier.asd_list, asd_list) {
1422 		s_asd = container_of(asd, struct sensor_async_subdev, asd);
1423 		q = &cio2->queue[s_asd->csi2.port];
1424 
1425 		for (pad = 0; pad < q->sensor->entity.num_pads; pad++)
1426 			if (q->sensor->entity.pads[pad].flags &
1427 						MEDIA_PAD_FL_SOURCE)
1428 				break;
1429 
1430 		if (pad == q->sensor->entity.num_pads) {
1431 			dev_err(&cio2->pci_dev->dev,
1432 				"failed to find src pad for %s\n",
1433 				q->sensor->name);
1434 			return -ENXIO;
1435 		}
1436 
1437 		ret = media_create_pad_link(
1438 				&q->sensor->entity, pad,
1439 				&q->subdev.entity, CIO2_PAD_SINK,
1440 				0);
1441 		if (ret) {
1442 			dev_err(&cio2->pci_dev->dev,
1443 				"failed to create link for %s\n",
1444 				q->sensor->name);
1445 			return ret;
1446 		}
1447 	}
1448 
1449 	return v4l2_device_register_subdev_nodes(&cio2->v4l2_dev);
1450 }
1451 
1452 static const struct v4l2_async_notifier_operations cio2_async_ops = {
1453 	.bound = cio2_notifier_bound,
1454 	.unbind = cio2_notifier_unbind,
1455 	.complete = cio2_notifier_complete,
1456 };
1457 
cio2_parse_firmware(struct cio2_device * cio2)1458 static int cio2_parse_firmware(struct cio2_device *cio2)
1459 {
1460 	unsigned int i;
1461 	int ret;
1462 
1463 	for (i = 0; i < CIO2_NUM_PORTS; i++) {
1464 		struct v4l2_fwnode_endpoint vep = {
1465 			.bus_type = V4L2_MBUS_CSI2_DPHY
1466 		};
1467 		struct sensor_async_subdev *s_asd;
1468 		struct fwnode_handle *ep;
1469 
1470 		ep = fwnode_graph_get_endpoint_by_id(
1471 			dev_fwnode(&cio2->pci_dev->dev), i, 0,
1472 			FWNODE_GRAPH_ENDPOINT_NEXT);
1473 
1474 		if (!ep)
1475 			continue;
1476 
1477 		ret = v4l2_fwnode_endpoint_parse(ep, &vep);
1478 		if (ret)
1479 			goto err_parse;
1480 
1481 		s_asd = v4l2_async_notifier_add_fwnode_remote_subdev(
1482 				&cio2->notifier, ep, struct sensor_async_subdev);
1483 		if (IS_ERR(s_asd)) {
1484 			ret = PTR_ERR(s_asd);
1485 			goto err_parse;
1486 		}
1487 
1488 		s_asd->csi2.port = vep.base.port;
1489 		s_asd->csi2.lanes = vep.bus.mipi_csi2.num_data_lanes;
1490 
1491 		fwnode_handle_put(ep);
1492 
1493 		continue;
1494 
1495 err_parse:
1496 		fwnode_handle_put(ep);
1497 		return ret;
1498 	}
1499 
1500 	/*
1501 	 * Proceed even without sensors connected to allow the device to
1502 	 * suspend.
1503 	 */
1504 	cio2->notifier.ops = &cio2_async_ops;
1505 	ret = v4l2_async_notifier_register(&cio2->v4l2_dev, &cio2->notifier);
1506 	if (ret)
1507 		dev_err(&cio2->pci_dev->dev,
1508 			"failed to register async notifier : %d\n", ret);
1509 
1510 	return ret;
1511 }
1512 
1513 /**************** Queue initialization ****************/
1514 static const struct media_entity_operations cio2_media_ops = {
1515 	.link_validate = v4l2_subdev_link_validate,
1516 };
1517 
1518 static const struct media_entity_operations cio2_video_entity_ops = {
1519 	.link_validate = cio2_video_link_validate,
1520 };
1521 
cio2_queue_init(struct cio2_device * cio2,struct cio2_queue * q)1522 static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
1523 {
1524 	static const u32 default_width = 1936;
1525 	static const u32 default_height = 1096;
1526 	const struct ipu3_cio2_fmt dflt_fmt = formats[0];
1527 
1528 	struct video_device *vdev = &q->vdev;
1529 	struct vb2_queue *vbq = &q->vbq;
1530 	struct v4l2_subdev *subdev = &q->subdev;
1531 	struct v4l2_mbus_framefmt *fmt;
1532 	int r;
1533 
1534 	/* Initialize miscellaneous variables */
1535 	mutex_init(&q->lock);
1536 	mutex_init(&q->subdev_lock);
1537 
1538 	/* Initialize formats to default values */
1539 	fmt = &q->subdev_fmt;
1540 	fmt->width = default_width;
1541 	fmt->height = default_height;
1542 	fmt->code = dflt_fmt.mbus_code;
1543 	fmt->field = V4L2_FIELD_NONE;
1544 
1545 	q->format.width = default_width;
1546 	q->format.height = default_height;
1547 	q->format.pixelformat = dflt_fmt.fourcc;
1548 	q->format.colorspace = V4L2_COLORSPACE_RAW;
1549 	q->format.field = V4L2_FIELD_NONE;
1550 	q->format.num_planes = 1;
1551 	q->format.plane_fmt[0].bytesperline =
1552 				cio2_bytesperline(q->format.width);
1553 	q->format.plane_fmt[0].sizeimage = q->format.plane_fmt[0].bytesperline *
1554 						q->format.height;
1555 
1556 	/* Initialize fbpt */
1557 	r = cio2_fbpt_init(cio2, q);
1558 	if (r)
1559 		goto fail_fbpt;
1560 
1561 	/* Initialize media entities */
1562 	q->subdev_pads[CIO2_PAD_SINK].flags = MEDIA_PAD_FL_SINK |
1563 		MEDIA_PAD_FL_MUST_CONNECT;
1564 	q->subdev_pads[CIO2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
1565 	subdev->entity.ops = &cio2_media_ops;
1566 	subdev->internal_ops = &cio2_subdev_internal_ops;
1567 	r = media_entity_pads_init(&subdev->entity, CIO2_PADS, q->subdev_pads);
1568 	if (r) {
1569 		dev_err(&cio2->pci_dev->dev,
1570 			"failed initialize subdev media entity (%d)\n", r);
1571 		goto fail_subdev_media_entity;
1572 	}
1573 
1574 	q->vdev_pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT;
1575 	vdev->entity.ops = &cio2_video_entity_ops;
1576 	r = media_entity_pads_init(&vdev->entity, 1, &q->vdev_pad);
1577 	if (r) {
1578 		dev_err(&cio2->pci_dev->dev,
1579 			"failed initialize videodev media entity (%d)\n", r);
1580 		goto fail_vdev_media_entity;
1581 	}
1582 
1583 	/* Initialize subdev */
1584 	v4l2_subdev_init(subdev, &cio2_subdev_ops);
1585 	subdev->flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
1586 	subdev->owner = THIS_MODULE;
1587 	snprintf(subdev->name, sizeof(subdev->name),
1588 		 CIO2_ENTITY_NAME " %td", q - cio2->queue);
1589 	subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
1590 	v4l2_set_subdevdata(subdev, cio2);
1591 	r = v4l2_device_register_subdev(&cio2->v4l2_dev, subdev);
1592 	if (r) {
1593 		dev_err(&cio2->pci_dev->dev,
1594 			"failed initialize subdev (%d)\n", r);
1595 		goto fail_subdev;
1596 	}
1597 
1598 	/* Initialize vbq */
1599 	vbq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
1600 	vbq->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF;
1601 	vbq->ops = &cio2_vb2_ops;
1602 	vbq->mem_ops = &vb2_dma_sg_memops;
1603 	vbq->buf_struct_size = sizeof(struct cio2_buffer);
1604 	vbq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1605 	vbq->min_buffers_needed = 1;
1606 	vbq->drv_priv = cio2;
1607 	vbq->lock = &q->lock;
1608 	r = vb2_queue_init(vbq);
1609 	if (r) {
1610 		dev_err(&cio2->pci_dev->dev,
1611 			"failed to initialize videobuf2 queue (%d)\n", r);
1612 		goto fail_subdev;
1613 	}
1614 
1615 	/* Initialize vdev */
1616 	snprintf(vdev->name, sizeof(vdev->name),
1617 		 "%s %td", CIO2_NAME, q - cio2->queue);
1618 	vdev->release = video_device_release_empty;
1619 	vdev->fops = &cio2_v4l2_fops;
1620 	vdev->ioctl_ops = &cio2_v4l2_ioctl_ops;
1621 	vdev->lock = &cio2->lock;
1622 	vdev->v4l2_dev = &cio2->v4l2_dev;
1623 	vdev->queue = &q->vbq;
1624 	vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_STREAMING;
1625 	video_set_drvdata(vdev, cio2);
1626 	r = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
1627 	if (r) {
1628 		dev_err(&cio2->pci_dev->dev,
1629 			"failed to register video device (%d)\n", r);
1630 		goto fail_vdev;
1631 	}
1632 
1633 	/* Create link from CIO2 subdev to output node */
1634 	r = media_create_pad_link(
1635 		&subdev->entity, CIO2_PAD_SOURCE, &vdev->entity, 0,
1636 		MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
1637 	if (r)
1638 		goto fail_link;
1639 
1640 	return 0;
1641 
1642 fail_link:
1643 	vb2_video_unregister_device(&q->vdev);
1644 fail_vdev:
1645 	v4l2_device_unregister_subdev(subdev);
1646 fail_subdev:
1647 	media_entity_cleanup(&vdev->entity);
1648 fail_vdev_media_entity:
1649 	media_entity_cleanup(&subdev->entity);
1650 fail_subdev_media_entity:
1651 	cio2_fbpt_exit(q, &cio2->pci_dev->dev);
1652 fail_fbpt:
1653 	mutex_destroy(&q->subdev_lock);
1654 	mutex_destroy(&q->lock);
1655 
1656 	return r;
1657 }
1658 
cio2_queue_exit(struct cio2_device * cio2,struct cio2_queue * q)1659 static void cio2_queue_exit(struct cio2_device *cio2, struct cio2_queue *q)
1660 {
1661 	vb2_video_unregister_device(&q->vdev);
1662 	media_entity_cleanup(&q->vdev.entity);
1663 	v4l2_device_unregister_subdev(&q->subdev);
1664 	media_entity_cleanup(&q->subdev.entity);
1665 	cio2_fbpt_exit(q, &cio2->pci_dev->dev);
1666 	mutex_destroy(&q->subdev_lock);
1667 	mutex_destroy(&q->lock);
1668 }
1669 
cio2_queues_init(struct cio2_device * cio2)1670 static int cio2_queues_init(struct cio2_device *cio2)
1671 {
1672 	int i, r;
1673 
1674 	for (i = 0; i < CIO2_QUEUES; i++) {
1675 		r = cio2_queue_init(cio2, &cio2->queue[i]);
1676 		if (r)
1677 			break;
1678 	}
1679 
1680 	if (i == CIO2_QUEUES)
1681 		return 0;
1682 
1683 	for (i--; i >= 0; i--)
1684 		cio2_queue_exit(cio2, &cio2->queue[i]);
1685 
1686 	return r;
1687 }
1688 
cio2_queues_exit(struct cio2_device * cio2)1689 static void cio2_queues_exit(struct cio2_device *cio2)
1690 {
1691 	unsigned int i;
1692 
1693 	for (i = 0; i < CIO2_QUEUES; i++)
1694 		cio2_queue_exit(cio2, &cio2->queue[i]);
1695 }
1696 
cio2_check_fwnode_graph(struct fwnode_handle * fwnode)1697 static int cio2_check_fwnode_graph(struct fwnode_handle *fwnode)
1698 {
1699 	struct fwnode_handle *endpoint;
1700 
1701 	if (IS_ERR_OR_NULL(fwnode))
1702 		return -EINVAL;
1703 
1704 	endpoint = fwnode_graph_get_next_endpoint(fwnode, NULL);
1705 	if (endpoint) {
1706 		fwnode_handle_put(endpoint);
1707 		return 0;
1708 	}
1709 
1710 	return cio2_check_fwnode_graph(fwnode->secondary);
1711 }
1712 
1713 /**************** PCI interface ****************/
1714 
cio2_pci_probe(struct pci_dev * pci_dev,const struct pci_device_id * id)1715 static int cio2_pci_probe(struct pci_dev *pci_dev,
1716 			  const struct pci_device_id *id)
1717 {
1718 	struct fwnode_handle *fwnode = dev_fwnode(&pci_dev->dev);
1719 	struct cio2_device *cio2;
1720 	int r;
1721 
1722 	cio2 = devm_kzalloc(&pci_dev->dev, sizeof(*cio2), GFP_KERNEL);
1723 	if (!cio2)
1724 		return -ENOMEM;
1725 	cio2->pci_dev = pci_dev;
1726 
1727 	/*
1728 	 * On some platforms no connections to sensors are defined in firmware,
1729 	 * if the device has no endpoints then we can try to build those as
1730 	 * software_nodes parsed from SSDB.
1731 	 */
1732 	r = cio2_check_fwnode_graph(fwnode);
1733 	if (r) {
1734 		if (fwnode && !IS_ERR_OR_NULL(fwnode->secondary)) {
1735 			dev_err(&pci_dev->dev, "fwnode graph has no endpoints connected\n");
1736 			return -EINVAL;
1737 		}
1738 
1739 		r = cio2_bridge_init(pci_dev);
1740 		if (r)
1741 			return r;
1742 	}
1743 
1744 	r = pcim_enable_device(pci_dev);
1745 	if (r) {
1746 		dev_err(&pci_dev->dev, "failed to enable device (%d)\n", r);
1747 		return r;
1748 	}
1749 
1750 	dev_info(&pci_dev->dev, "device 0x%x (rev: 0x%x)\n",
1751 		 pci_dev->device, pci_dev->revision);
1752 
1753 	r = pcim_iomap_regions(pci_dev, 1 << CIO2_PCI_BAR, pci_name(pci_dev));
1754 	if (r) {
1755 		dev_err(&pci_dev->dev, "failed to remap I/O memory (%d)\n", r);
1756 		return -ENODEV;
1757 	}
1758 
1759 	cio2->base = pcim_iomap_table(pci_dev)[CIO2_PCI_BAR];
1760 
1761 	pci_set_drvdata(pci_dev, cio2);
1762 
1763 	pci_set_master(pci_dev);
1764 
1765 	r = pci_set_dma_mask(pci_dev, CIO2_DMA_MASK);
1766 	if (r) {
1767 		dev_err(&pci_dev->dev, "failed to set DMA mask (%d)\n", r);
1768 		return -ENODEV;
1769 	}
1770 
1771 	r = pci_enable_msi(pci_dev);
1772 	if (r) {
1773 		dev_err(&pci_dev->dev, "failed to enable MSI (%d)\n", r);
1774 		return r;
1775 	}
1776 
1777 	r = cio2_fbpt_init_dummy(cio2);
1778 	if (r)
1779 		return r;
1780 
1781 	mutex_init(&cio2->lock);
1782 
1783 	cio2->media_dev.dev = &cio2->pci_dev->dev;
1784 	strscpy(cio2->media_dev.model, CIO2_DEVICE_NAME,
1785 		sizeof(cio2->media_dev.model));
1786 	snprintf(cio2->media_dev.bus_info, sizeof(cio2->media_dev.bus_info),
1787 		 "PCI:%s", pci_name(cio2->pci_dev));
1788 	cio2->media_dev.hw_revision = 0;
1789 
1790 	media_device_init(&cio2->media_dev);
1791 	r = media_device_register(&cio2->media_dev);
1792 	if (r < 0)
1793 		goto fail_mutex_destroy;
1794 
1795 	cio2->v4l2_dev.mdev = &cio2->media_dev;
1796 	r = v4l2_device_register(&pci_dev->dev, &cio2->v4l2_dev);
1797 	if (r) {
1798 		dev_err(&pci_dev->dev,
1799 			"failed to register V4L2 device (%d)\n", r);
1800 		goto fail_media_device_unregister;
1801 	}
1802 
1803 	r = cio2_queues_init(cio2);
1804 	if (r)
1805 		goto fail_v4l2_device_unregister;
1806 
1807 	v4l2_async_notifier_init(&cio2->notifier);
1808 
1809 	/* Register notifier for subdevices we care */
1810 	r = cio2_parse_firmware(cio2);
1811 	if (r)
1812 		goto fail_clean_notifier;
1813 
1814 	r = devm_request_irq(&pci_dev->dev, pci_dev->irq, cio2_irq,
1815 			     IRQF_SHARED, CIO2_NAME, cio2);
1816 	if (r) {
1817 		dev_err(&pci_dev->dev, "failed to request IRQ (%d)\n", r);
1818 		goto fail_clean_notifier;
1819 	}
1820 
1821 	pm_runtime_put_noidle(&pci_dev->dev);
1822 	pm_runtime_allow(&pci_dev->dev);
1823 
1824 	return 0;
1825 
1826 fail_clean_notifier:
1827 	v4l2_async_notifier_unregister(&cio2->notifier);
1828 	v4l2_async_notifier_cleanup(&cio2->notifier);
1829 	cio2_queues_exit(cio2);
1830 fail_v4l2_device_unregister:
1831 	v4l2_device_unregister(&cio2->v4l2_dev);
1832 fail_media_device_unregister:
1833 	media_device_unregister(&cio2->media_dev);
1834 	media_device_cleanup(&cio2->media_dev);
1835 fail_mutex_destroy:
1836 	mutex_destroy(&cio2->lock);
1837 	cio2_fbpt_exit_dummy(cio2);
1838 
1839 	return r;
1840 }
1841 
cio2_pci_remove(struct pci_dev * pci_dev)1842 static void cio2_pci_remove(struct pci_dev *pci_dev)
1843 {
1844 	struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1845 
1846 	media_device_unregister(&cio2->media_dev);
1847 	v4l2_async_notifier_unregister(&cio2->notifier);
1848 	v4l2_async_notifier_cleanup(&cio2->notifier);
1849 	cio2_queues_exit(cio2);
1850 	cio2_fbpt_exit_dummy(cio2);
1851 	v4l2_device_unregister(&cio2->v4l2_dev);
1852 	media_device_cleanup(&cio2->media_dev);
1853 	mutex_destroy(&cio2->lock);
1854 
1855 	pm_runtime_forbid(&pci_dev->dev);
1856 	pm_runtime_get_noresume(&pci_dev->dev);
1857 }
1858 
cio2_runtime_suspend(struct device * dev)1859 static int __maybe_unused cio2_runtime_suspend(struct device *dev)
1860 {
1861 	struct pci_dev *pci_dev = to_pci_dev(dev);
1862 	struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1863 	void __iomem *const base = cio2->base;
1864 	u16 pm;
1865 
1866 	writel(CIO2_D0I3C_I3, base + CIO2_REG_D0I3C);
1867 	dev_dbg(dev, "cio2 runtime suspend.\n");
1868 
1869 	pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm);
1870 	pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT;
1871 	pm |= CIO2_PMCSR_D3;
1872 	pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm);
1873 
1874 	return 0;
1875 }
1876 
cio2_runtime_resume(struct device * dev)1877 static int __maybe_unused cio2_runtime_resume(struct device *dev)
1878 {
1879 	struct pci_dev *pci_dev = to_pci_dev(dev);
1880 	struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1881 	void __iomem *const base = cio2->base;
1882 	u16 pm;
1883 
1884 	writel(CIO2_D0I3C_RR, base + CIO2_REG_D0I3C);
1885 	dev_dbg(dev, "cio2 runtime resume.\n");
1886 
1887 	pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm);
1888 	pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT;
1889 	pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm);
1890 
1891 	return 0;
1892 }
1893 
1894 /*
1895  * Helper function to advance all the elements of a circular buffer by "start"
1896  * positions
1897  */
arrange(void * ptr,size_t elem_size,size_t elems,size_t start)1898 static void arrange(void *ptr, size_t elem_size, size_t elems, size_t start)
1899 {
1900 	struct {
1901 		size_t begin, end;
1902 	} arr[2] = {
1903 		{ 0, start - 1 },
1904 		{ start, elems - 1 },
1905 	};
1906 
1907 #define CHUNK_SIZE(a) ((a)->end - (a)->begin + 1)
1908 
1909 	/* Loop as long as we have out-of-place entries */
1910 	while (CHUNK_SIZE(&arr[0]) && CHUNK_SIZE(&arr[1])) {
1911 		size_t size0, i;
1912 
1913 		/*
1914 		 * Find the number of entries that can be arranged on this
1915 		 * iteration.
1916 		 */
1917 		size0 = min(CHUNK_SIZE(&arr[0]), CHUNK_SIZE(&arr[1]));
1918 
1919 		/* Swap the entries in two parts of the array. */
1920 		for (i = 0; i < size0; i++) {
1921 			u8 *d = ptr + elem_size * (arr[1].begin + i);
1922 			u8 *s = ptr + elem_size * (arr[0].begin + i);
1923 			size_t j;
1924 
1925 			for (j = 0; j < elem_size; j++)
1926 				swap(d[j], s[j]);
1927 		}
1928 
1929 		if (CHUNK_SIZE(&arr[0]) > CHUNK_SIZE(&arr[1])) {
1930 			/* The end of the first array remains unarranged. */
1931 			arr[0].begin += size0;
1932 		} else {
1933 			/*
1934 			 * The first array is fully arranged so we proceed
1935 			 * handling the next one.
1936 			 */
1937 			arr[0].begin = arr[1].begin;
1938 			arr[0].end = arr[1].begin + size0 - 1;
1939 			arr[1].begin += size0;
1940 		}
1941 	}
1942 }
1943 
cio2_fbpt_rearrange(struct cio2_device * cio2,struct cio2_queue * q)1944 static void cio2_fbpt_rearrange(struct cio2_device *cio2, struct cio2_queue *q)
1945 {
1946 	unsigned int i, j;
1947 
1948 	for (i = 0, j = q->bufs_first; i < CIO2_MAX_BUFFERS;
1949 		i++, j = (j + 1) % CIO2_MAX_BUFFERS)
1950 		if (q->bufs[j])
1951 			break;
1952 
1953 	if (i == CIO2_MAX_BUFFERS)
1954 		return;
1955 
1956 	if (j) {
1957 		arrange(q->fbpt, sizeof(struct cio2_fbpt_entry) * CIO2_MAX_LOPS,
1958 			CIO2_MAX_BUFFERS, j);
1959 		arrange(q->bufs, sizeof(struct cio2_buffer *),
1960 			CIO2_MAX_BUFFERS, j);
1961 	}
1962 
1963 	/*
1964 	 * DMA clears the valid bit when accessing the buffer.
1965 	 * When stopping stream in suspend callback, some of the buffers
1966 	 * may be in invalid state. After resume, when DMA meets the invalid
1967 	 * buffer, it will halt and stop receiving new data.
1968 	 * To avoid DMA halting, set the valid bit for all buffers in FBPT.
1969 	 */
1970 	for (i = 0; i < CIO2_MAX_BUFFERS; i++)
1971 		cio2_fbpt_entry_enable(cio2, q->fbpt + i * CIO2_MAX_LOPS);
1972 }
1973 
cio2_suspend(struct device * dev)1974 static int __maybe_unused cio2_suspend(struct device *dev)
1975 {
1976 	struct pci_dev *pci_dev = to_pci_dev(dev);
1977 	struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1978 	struct cio2_queue *q = cio2->cur_queue;
1979 
1980 	dev_dbg(dev, "cio2 suspend\n");
1981 	if (!cio2->streaming)
1982 		return 0;
1983 
1984 	/* Stop stream */
1985 	cio2_hw_exit(cio2, q);
1986 	synchronize_irq(pci_dev->irq);
1987 
1988 	pm_runtime_force_suspend(dev);
1989 
1990 	/*
1991 	 * Upon resume, hw starts to process the fbpt entries from beginning,
1992 	 * so relocate the queued buffs to the fbpt head before suspend.
1993 	 */
1994 	cio2_fbpt_rearrange(cio2, q);
1995 	q->bufs_first = 0;
1996 	q->bufs_next = 0;
1997 
1998 	return 0;
1999 }
2000 
cio2_resume(struct device * dev)2001 static int __maybe_unused cio2_resume(struct device *dev)
2002 {
2003 	struct cio2_device *cio2 = dev_get_drvdata(dev);
2004 	struct cio2_queue *q = cio2->cur_queue;
2005 	int r;
2006 
2007 	dev_dbg(dev, "cio2 resume\n");
2008 	if (!cio2->streaming)
2009 		return 0;
2010 	/* Start stream */
2011 	r = pm_runtime_force_resume(&cio2->pci_dev->dev);
2012 	if (r < 0) {
2013 		dev_err(&cio2->pci_dev->dev,
2014 			"failed to set power %d\n", r);
2015 		return r;
2016 	}
2017 
2018 	r = cio2_hw_init(cio2, q);
2019 	if (r)
2020 		dev_err(dev, "fail to init cio2 hw\n");
2021 
2022 	return r;
2023 }
2024 
2025 static const struct dev_pm_ops cio2_pm_ops = {
2026 	SET_RUNTIME_PM_OPS(&cio2_runtime_suspend, &cio2_runtime_resume, NULL)
2027 	SET_SYSTEM_SLEEP_PM_OPS(&cio2_suspend, &cio2_resume)
2028 };
2029 
2030 static const struct pci_device_id cio2_pci_id_table[] = {
2031 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, CIO2_PCI_ID) },
2032 	{ }
2033 };
2034 
2035 MODULE_DEVICE_TABLE(pci, cio2_pci_id_table);
2036 
2037 static struct pci_driver cio2_pci_driver = {
2038 	.name = CIO2_NAME,
2039 	.id_table = cio2_pci_id_table,
2040 	.probe = cio2_pci_probe,
2041 	.remove = cio2_pci_remove,
2042 	.driver = {
2043 		.pm = &cio2_pm_ops,
2044 	},
2045 };
2046 
2047 module_pci_driver(cio2_pci_driver);
2048 
2049 MODULE_AUTHOR("Tuukka Toivonen <tuukka.toivonen@intel.com>");
2050 MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
2051 MODULE_AUTHOR("Jian Xu Zheng");
2052 MODULE_AUTHOR("Yuning Pu <yuning.pu@intel.com>");
2053 MODULE_AUTHOR("Yong Zhi <yong.zhi@intel.com>");
2054 MODULE_LICENSE("GPL v2");
2055 MODULE_DESCRIPTION("IPU3 CIO2 driver");
2056