• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2017,2020 Intel Corporation
4  *
5  * Based partially on Intel IPU4 driver written by
6  *  Sakari Ailus <sakari.ailus@linux.intel.com>
7  *  Samu Onkalo <samu.onkalo@intel.com>
8  *  Jouni Högander <jouni.hogander@intel.com>
9  *  Jouni Ukkonen <jouni.ukkonen@intel.com>
10  *  Antti Laakso <antti.laakso@intel.com>
11  * et al.
12  */
13 
14 #include <linux/delay.h>
15 #include <linux/interrupt.h>
16 #include <linux/iopoll.h>
17 #include <linux/module.h>
18 #include <linux/pci.h>
19 #include <linux/pfn.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/property.h>
22 #include <linux/vmalloc.h>
23 #include <media/v4l2-ctrls.h>
24 #include <media/v4l2-device.h>
25 #include <media/v4l2-event.h>
26 #include <media/v4l2-fwnode.h>
27 #include <media/v4l2-ioctl.h>
28 #include <media/videobuf2-dma-sg.h>
29 
30 #include "ipu3-cio2.h"
31 
32 struct ipu3_cio2_fmt {
33 	u32 mbus_code;
34 	u32 fourcc;
35 	u8 mipicode;
36 };
37 
38 /*
39  * These are raw formats used in Intel's third generation of
40  * Image Processing Unit known as IPU3.
41  * 10bit raw bayer packed, 32 bytes for every 25 pixels,
42  * last LSB 6 bits unused.
43  */
44 static const struct ipu3_cio2_fmt formats[] = {
45 	{	/* put default entry at beginning */
46 		.mbus_code	= MEDIA_BUS_FMT_SGRBG10_1X10,
47 		.fourcc		= V4L2_PIX_FMT_IPU3_SGRBG10,
48 		.mipicode	= 0x2b,
49 	}, {
50 		.mbus_code	= MEDIA_BUS_FMT_SGBRG10_1X10,
51 		.fourcc		= V4L2_PIX_FMT_IPU3_SGBRG10,
52 		.mipicode	= 0x2b,
53 	}, {
54 		.mbus_code	= MEDIA_BUS_FMT_SBGGR10_1X10,
55 		.fourcc		= V4L2_PIX_FMT_IPU3_SBGGR10,
56 		.mipicode	= 0x2b,
57 	}, {
58 		.mbus_code	= MEDIA_BUS_FMT_SRGGB10_1X10,
59 		.fourcc		= V4L2_PIX_FMT_IPU3_SRGGB10,
60 		.mipicode	= 0x2b,
61 	},
62 };
63 
64 /*
65  * cio2_find_format - lookup color format by fourcc or/and media bus code
66  * @pixelformat: fourcc to match, ignored if null
67  * @mbus_code: media bus code to match, ignored if null
68  */
cio2_find_format(const u32 * pixelformat,const u32 * mbus_code)69 static const struct ipu3_cio2_fmt *cio2_find_format(const u32 *pixelformat,
70 						    const u32 *mbus_code)
71 {
72 	unsigned int i;
73 
74 	for (i = 0; i < ARRAY_SIZE(formats); i++) {
75 		if (pixelformat && *pixelformat != formats[i].fourcc)
76 			continue;
77 		if (mbus_code && *mbus_code != formats[i].mbus_code)
78 			continue;
79 
80 		return &formats[i];
81 	}
82 
83 	return NULL;
84 }
85 
cio2_bytesperline(const unsigned int width)86 static inline u32 cio2_bytesperline(const unsigned int width)
87 {
88 	/*
89 	 * 64 bytes for every 50 pixels, the line length
90 	 * in bytes is multiple of 64 (line end alignment).
91 	 */
92 	return DIV_ROUND_UP(width, 50) * 64;
93 }
94 
95 /**************** FBPT operations ****************/
96 
cio2_fbpt_exit_dummy(struct cio2_device * cio2)97 static void cio2_fbpt_exit_dummy(struct cio2_device *cio2)
98 {
99 	if (cio2->dummy_lop) {
100 		dma_free_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
101 				  cio2->dummy_lop, cio2->dummy_lop_bus_addr);
102 		cio2->dummy_lop = NULL;
103 	}
104 	if (cio2->dummy_page) {
105 		dma_free_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
106 				  cio2->dummy_page, cio2->dummy_page_bus_addr);
107 		cio2->dummy_page = NULL;
108 	}
109 }
110 
cio2_fbpt_init_dummy(struct cio2_device * cio2)111 static int cio2_fbpt_init_dummy(struct cio2_device *cio2)
112 {
113 	unsigned int i;
114 
115 	cio2->dummy_page = dma_alloc_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
116 					      &cio2->dummy_page_bus_addr,
117 					      GFP_KERNEL);
118 	cio2->dummy_lop = dma_alloc_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
119 					     &cio2->dummy_lop_bus_addr,
120 					     GFP_KERNEL);
121 	if (!cio2->dummy_page || !cio2->dummy_lop) {
122 		cio2_fbpt_exit_dummy(cio2);
123 		return -ENOMEM;
124 	}
125 	/*
126 	 * List of Pointers(LOP) contains 1024x32b pointers to 4KB page each
127 	 * Initialize each entry to dummy_page bus base address.
128 	 */
129 	for (i = 0; i < CIO2_LOP_ENTRIES; i++)
130 		cio2->dummy_lop[i] = PFN_DOWN(cio2->dummy_page_bus_addr);
131 
132 	return 0;
133 }
134 
cio2_fbpt_entry_enable(struct cio2_device * cio2,struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])135 static void cio2_fbpt_entry_enable(struct cio2_device *cio2,
136 				   struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])
137 {
138 	/*
139 	 * The CPU first initializes some fields in fbpt, then sets
140 	 * the VALID bit, this barrier is to ensure that the DMA(device)
141 	 * does not see the VALID bit enabled before other fields are
142 	 * initialized; otherwise it could lead to havoc.
143 	 */
144 	dma_wmb();
145 
146 	/*
147 	 * Request interrupts for start and completion
148 	 * Valid bit is applicable only to 1st entry
149 	 */
150 	entry[0].first_entry.ctrl = CIO2_FBPT_CTRL_VALID |
151 		CIO2_FBPT_CTRL_IOC | CIO2_FBPT_CTRL_IOS;
152 }
153 
154 /* Initialize fpbt entries to point to dummy frame */
cio2_fbpt_entry_init_dummy(struct cio2_device * cio2,struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])155 static void cio2_fbpt_entry_init_dummy(struct cio2_device *cio2,
156 				       struct cio2_fbpt_entry
157 				       entry[CIO2_MAX_LOPS])
158 {
159 	unsigned int i;
160 
161 	entry[0].first_entry.first_page_offset = 0;
162 	entry[1].second_entry.num_of_pages = CIO2_LOP_ENTRIES * CIO2_MAX_LOPS;
163 	entry[1].second_entry.last_page_available_bytes = PAGE_SIZE - 1;
164 
165 	for (i = 0; i < CIO2_MAX_LOPS; i++)
166 		entry[i].lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr);
167 
168 	cio2_fbpt_entry_enable(cio2, entry);
169 }
170 
171 /* Initialize fpbt entries to point to a given buffer */
cio2_fbpt_entry_init_buf(struct cio2_device * cio2,struct cio2_buffer * b,struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])172 static void cio2_fbpt_entry_init_buf(struct cio2_device *cio2,
173 				     struct cio2_buffer *b,
174 				     struct cio2_fbpt_entry
175 				     entry[CIO2_MAX_LOPS])
176 {
177 	struct vb2_buffer *vb = &b->vbb.vb2_buf;
178 	unsigned int length = vb->planes[0].length;
179 	int remaining, i;
180 
181 	entry[0].first_entry.first_page_offset = b->offset;
182 	remaining = length + entry[0].first_entry.first_page_offset;
183 	entry[1].second_entry.num_of_pages = PFN_UP(remaining);
184 	/*
185 	 * last_page_available_bytes has the offset of the last byte in the
186 	 * last page which is still accessible by DMA. DMA cannot access
187 	 * beyond this point. Valid range for this is from 0 to 4095.
188 	 * 0 indicates 1st byte in the page is DMA accessible.
189 	 * 4095 (PAGE_SIZE - 1) means every single byte in the last page
190 	 * is available for DMA transfer.
191 	 */
192 	entry[1].second_entry.last_page_available_bytes =
193 			(remaining & ~PAGE_MASK) ?
194 				(remaining & ~PAGE_MASK) - 1 : PAGE_SIZE - 1;
195 	/* Fill FBPT */
196 	remaining = length;
197 	i = 0;
198 	while (remaining > 0) {
199 		entry->lop_page_addr = PFN_DOWN(b->lop_bus_addr[i]);
200 		remaining -= CIO2_LOP_ENTRIES * PAGE_SIZE;
201 		entry++;
202 		i++;
203 	}
204 
205 	/*
206 	 * The first not meaningful FBPT entry should point to a valid LOP
207 	 */
208 	entry->lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr);
209 
210 	cio2_fbpt_entry_enable(cio2, entry);
211 }
212 
cio2_fbpt_init(struct cio2_device * cio2,struct cio2_queue * q)213 static int cio2_fbpt_init(struct cio2_device *cio2, struct cio2_queue *q)
214 {
215 	struct device *dev = &cio2->pci_dev->dev;
216 
217 	q->fbpt = dma_alloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr,
218 				     GFP_KERNEL);
219 	if (!q->fbpt)
220 		return -ENOMEM;
221 
222 	return 0;
223 }
224 
cio2_fbpt_exit(struct cio2_queue * q,struct device * dev)225 static void cio2_fbpt_exit(struct cio2_queue *q, struct device *dev)
226 {
227 	dma_free_coherent(dev, CIO2_FBPT_SIZE, q->fbpt, q->fbpt_bus_addr);
228 }
229 
230 /**************** CSI2 hardware setup ****************/
231 
232 /*
233  * The CSI2 receiver has several parameters affecting
234  * the receiver timings. These depend on the MIPI bus frequency
235  * F in Hz (sensor transmitter rate) as follows:
236  *     register value = (A/1e9 + B * UI) / COUNT_ACC
237  * where
238  *      UI = 1 / (2 * F) in seconds
239  *      COUNT_ACC = counter accuracy in seconds
240  *      For IPU3 COUNT_ACC = 0.0625
241  *
242  * A and B are coefficients from the table below,
243  * depending whether the register minimum or maximum value is
244  * calculated.
245  *                                     Minimum     Maximum
246  * Clock lane                          A     B     A     B
247  * reg_rx_csi_dly_cnt_termen_clane     0     0    38     0
248  * reg_rx_csi_dly_cnt_settle_clane    95    -8   300   -16
249  * Data lanes
250  * reg_rx_csi_dly_cnt_termen_dlane0    0     0    35     4
251  * reg_rx_csi_dly_cnt_settle_dlane0   85    -2   145    -6
252  * reg_rx_csi_dly_cnt_termen_dlane1    0     0    35     4
253  * reg_rx_csi_dly_cnt_settle_dlane1   85    -2   145    -6
254  * reg_rx_csi_dly_cnt_termen_dlane2    0     0    35     4
255  * reg_rx_csi_dly_cnt_settle_dlane2   85    -2   145    -6
256  * reg_rx_csi_dly_cnt_termen_dlane3    0     0    35     4
257  * reg_rx_csi_dly_cnt_settle_dlane3   85    -2   145    -6
258  *
259  * We use the minimum values of both A and B.
260  */
261 
262 /*
263  * shift for keeping value range suitable for 32-bit integer arithmetic
264  */
265 #define LIMIT_SHIFT	8
266 
cio2_rx_timing(s32 a,s32 b,s64 freq,int def)267 static s32 cio2_rx_timing(s32 a, s32 b, s64 freq, int def)
268 {
269 	const u32 accinv = 16; /* invert of counter resolution */
270 	const u32 uiinv = 500000000; /* 1e9 / 2 */
271 	s32 r;
272 
273 	freq >>= LIMIT_SHIFT;
274 
275 	if (WARN_ON(freq <= 0 || freq > S32_MAX))
276 		return def;
277 	/*
278 	 * b could be 0, -2 or -8, so |accinv * b| is always
279 	 * less than (1 << ds) and thus |r| < 500000000.
280 	 */
281 	r = accinv * b * (uiinv >> LIMIT_SHIFT);
282 	r = r / (s32)freq;
283 	/* max value of a is 95 */
284 	r += accinv * a;
285 
286 	return r;
287 };
288 
289 /* Calculate the the delay value for termination enable of clock lane HS Rx */
cio2_csi2_calc_timing(struct cio2_device * cio2,struct cio2_queue * q,struct cio2_csi2_timing * timing)290 static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
291 				 struct cio2_csi2_timing *timing)
292 {
293 	struct device *dev = &cio2->pci_dev->dev;
294 	struct v4l2_querymenu qm = { .id = V4L2_CID_LINK_FREQ };
295 	struct v4l2_ctrl *link_freq;
296 	s64 freq;
297 	int r;
298 
299 	if (!q->sensor)
300 		return -ENODEV;
301 
302 	link_freq = v4l2_ctrl_find(q->sensor->ctrl_handler, V4L2_CID_LINK_FREQ);
303 	if (!link_freq) {
304 		dev_err(dev, "failed to find LINK_FREQ\n");
305 		return -EPIPE;
306 	}
307 
308 	qm.index = v4l2_ctrl_g_ctrl(link_freq);
309 	r = v4l2_querymenu(q->sensor->ctrl_handler, &qm);
310 	if (r) {
311 		dev_err(dev, "failed to get menu item\n");
312 		return r;
313 	}
314 
315 	if (!qm.value) {
316 		dev_err(dev, "error invalid link_freq\n");
317 		return -EINVAL;
318 	}
319 	freq = qm.value;
320 
321 	timing->clk_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_A,
322 					    CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_B,
323 					    freq,
324 					    CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
325 	timing->clk_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_A,
326 					    CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_B,
327 					    freq,
328 					    CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
329 	timing->dat_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_A,
330 					    CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_B,
331 					    freq,
332 					    CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
333 	timing->dat_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_A,
334 					    CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_B,
335 					    freq,
336 					    CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
337 
338 	dev_dbg(dev, "freq ct value is %d\n", timing->clk_termen);
339 	dev_dbg(dev, "freq cs value is %d\n", timing->clk_settle);
340 	dev_dbg(dev, "freq dt value is %d\n", timing->dat_termen);
341 	dev_dbg(dev, "freq ds value is %d\n", timing->dat_settle);
342 
343 	return 0;
344 };
345 
cio2_hw_init(struct cio2_device * cio2,struct cio2_queue * q)346 static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q)
347 {
348 	static const int NUM_VCS = 4;
349 	static const int SID;	/* Stream id */
350 	static const int ENTRY;
351 	static const int FBPT_WIDTH = DIV_ROUND_UP(CIO2_MAX_LOPS,
352 					CIO2_FBPT_SUBENTRY_UNIT);
353 	const u32 num_buffers1 = CIO2_MAX_BUFFERS - 1;
354 	const struct ipu3_cio2_fmt *fmt;
355 	void __iomem *const base = cio2->base;
356 	u8 lanes, csi2bus = q->csi2.port;
357 	u8 sensor_vc = SENSOR_VIR_CH_DFLT;
358 	struct cio2_csi2_timing timing;
359 	int i, r;
360 
361 	fmt = cio2_find_format(NULL, &q->subdev_fmt.code);
362 	if (!fmt)
363 		return -EINVAL;
364 
365 	lanes = q->csi2.lanes;
366 
367 	r = cio2_csi2_calc_timing(cio2, q, &timing);
368 	if (r)
369 		return r;
370 
371 	writel(timing.clk_termen, q->csi_rx_base +
372 		CIO2_REG_CSIRX_DLY_CNT_TERMEN(CIO2_CSIRX_DLY_CNT_CLANE_IDX));
373 	writel(timing.clk_settle, q->csi_rx_base +
374 		CIO2_REG_CSIRX_DLY_CNT_SETTLE(CIO2_CSIRX_DLY_CNT_CLANE_IDX));
375 
376 	for (i = 0; i < lanes; i++) {
377 		writel(timing.dat_termen, q->csi_rx_base +
378 			CIO2_REG_CSIRX_DLY_CNT_TERMEN(i));
379 		writel(timing.dat_settle, q->csi_rx_base +
380 			CIO2_REG_CSIRX_DLY_CNT_SETTLE(i));
381 	}
382 
383 	writel(CIO2_PBM_WMCTRL1_MIN_2CK |
384 	       CIO2_PBM_WMCTRL1_MID1_2CK |
385 	       CIO2_PBM_WMCTRL1_MID2_2CK, base + CIO2_REG_PBM_WMCTRL1);
386 	writel(CIO2_PBM_WMCTRL2_HWM_2CK << CIO2_PBM_WMCTRL2_HWM_2CK_SHIFT |
387 	       CIO2_PBM_WMCTRL2_LWM_2CK << CIO2_PBM_WMCTRL2_LWM_2CK_SHIFT |
388 	       CIO2_PBM_WMCTRL2_OBFFWM_2CK <<
389 	       CIO2_PBM_WMCTRL2_OBFFWM_2CK_SHIFT |
390 	       CIO2_PBM_WMCTRL2_TRANSDYN << CIO2_PBM_WMCTRL2_TRANSDYN_SHIFT |
391 	       CIO2_PBM_WMCTRL2_OBFF_MEM_EN, base + CIO2_REG_PBM_WMCTRL2);
392 	writel(CIO2_PBM_ARB_CTRL_LANES_DIV <<
393 	       CIO2_PBM_ARB_CTRL_LANES_DIV_SHIFT |
394 	       CIO2_PBM_ARB_CTRL_LE_EN |
395 	       CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN <<
396 	       CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN_SHIFT |
397 	       CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP <<
398 	       CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP_SHIFT,
399 	       base + CIO2_REG_PBM_ARB_CTRL);
400 	writel(CIO2_CSIRX_STATUS_DLANE_HS_MASK,
401 	       q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_HS);
402 	writel(CIO2_CSIRX_STATUS_DLANE_LP_MASK,
403 	       q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_LP);
404 
405 	writel(CIO2_FB_HPLL_FREQ, base + CIO2_REG_FB_HPLL_FREQ);
406 	writel(CIO2_ISCLK_RATIO, base + CIO2_REG_ISCLK_RATIO);
407 
408 	/* Configure MIPI backend */
409 	for (i = 0; i < NUM_VCS; i++)
410 		writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_SP_LUT_ENTRY(i));
411 
412 	/* There are 16 short packet LUT entry */
413 	for (i = 0; i < 16; i++)
414 		writel(CIO2_MIPIBE_LP_LUT_ENTRY_DISREGARD,
415 		       q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(i));
416 	writel(CIO2_MIPIBE_GLOBAL_LUT_DISREGARD,
417 	       q->csi_rx_base + CIO2_REG_MIPIBE_GLOBAL_LUT_DISREGARD);
418 
419 	writel(CIO2_INT_EN_EXT_IE_MASK, base + CIO2_REG_INT_EN_EXT_IE);
420 	writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
421 	writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
422 	writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_EDGE);
423 	writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_LEVEL_NOT_PULSE);
424 	writel(CIO2_INT_EN_EXT_OE_MASK, base + CIO2_REG_INT_EN_EXT_OE);
425 
426 	writel(CIO2_REG_INT_EN_IRQ | CIO2_INT_IOC(CIO2_DMA_CHAN) |
427 	       CIO2_REG_INT_EN_IOS(CIO2_DMA_CHAN),
428 	       base + CIO2_REG_INT_EN);
429 
430 	writel((CIO2_PXM_PXF_FMT_CFG_BPP_10 | CIO2_PXM_PXF_FMT_CFG_PCK_64B)
431 	       << CIO2_PXM_PXF_FMT_CFG_SID0_SHIFT,
432 	       base + CIO2_REG_PXM_PXF_FMT_CFG0(csi2bus));
433 	writel(SID << CIO2_MIPIBE_LP_LUT_ENTRY_SID_SHIFT |
434 	       sensor_vc << CIO2_MIPIBE_LP_LUT_ENTRY_VC_SHIFT |
435 	       fmt->mipicode << CIO2_MIPIBE_LP_LUT_ENTRY_FORMAT_TYPE_SHIFT,
436 	       q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(ENTRY));
437 	writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_COMP_FORMAT(sensor_vc));
438 	writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_FORCE_RAW8);
439 	writel(0, base + CIO2_REG_PXM_SID2BID0(csi2bus));
440 
441 	writel(lanes, q->csi_rx_base + CIO2_REG_CSIRX_NOF_ENABLED_LANES);
442 	writel(CIO2_CGC_PRIM_TGE |
443 	       CIO2_CGC_SIDE_TGE |
444 	       CIO2_CGC_XOSC_TGE |
445 	       CIO2_CGC_D3I3_TGE |
446 	       CIO2_CGC_CSI2_INTERFRAME_TGE |
447 	       CIO2_CGC_CSI2_PORT_DCGE |
448 	       CIO2_CGC_SIDE_DCGE |
449 	       CIO2_CGC_PRIM_DCGE |
450 	       CIO2_CGC_ROSC_DCGE |
451 	       CIO2_CGC_XOSC_DCGE |
452 	       CIO2_CGC_CLKGATE_HOLDOFF << CIO2_CGC_CLKGATE_HOLDOFF_SHIFT |
453 	       CIO2_CGC_CSI_CLKGATE_HOLDOFF
454 	       << CIO2_CGC_CSI_CLKGATE_HOLDOFF_SHIFT, base + CIO2_REG_CGC);
455 	writel(CIO2_LTRCTRL_LTRDYNEN, base + CIO2_REG_LTRCTRL);
456 	writel(CIO2_LTRVAL0_VAL << CIO2_LTRVAL02_VAL_SHIFT |
457 	       CIO2_LTRVAL0_SCALE << CIO2_LTRVAL02_SCALE_SHIFT |
458 	       CIO2_LTRVAL1_VAL << CIO2_LTRVAL13_VAL_SHIFT |
459 	       CIO2_LTRVAL1_SCALE << CIO2_LTRVAL13_SCALE_SHIFT,
460 	       base + CIO2_REG_LTRVAL01);
461 	writel(CIO2_LTRVAL2_VAL << CIO2_LTRVAL02_VAL_SHIFT |
462 	       CIO2_LTRVAL2_SCALE << CIO2_LTRVAL02_SCALE_SHIFT |
463 	       CIO2_LTRVAL3_VAL << CIO2_LTRVAL13_VAL_SHIFT |
464 	       CIO2_LTRVAL3_SCALE << CIO2_LTRVAL13_SCALE_SHIFT,
465 	       base + CIO2_REG_LTRVAL23);
466 
467 	for (i = 0; i < CIO2_NUM_DMA_CHAN; i++) {
468 		writel(0, base + CIO2_REG_CDMABA(i));
469 		writel(0, base + CIO2_REG_CDMAC0(i));
470 		writel(0, base + CIO2_REG_CDMAC1(i));
471 	}
472 
473 	/* Enable DMA */
474 	writel(PFN_DOWN(q->fbpt_bus_addr), base + CIO2_REG_CDMABA(CIO2_DMA_CHAN));
475 
476 	writel(num_buffers1 << CIO2_CDMAC0_FBPT_LEN_SHIFT |
477 	       FBPT_WIDTH << CIO2_CDMAC0_FBPT_WIDTH_SHIFT |
478 	       CIO2_CDMAC0_DMA_INTR_ON_FE |
479 	       CIO2_CDMAC0_FBPT_UPDATE_FIFO_FULL |
480 	       CIO2_CDMAC0_DMA_EN |
481 	       CIO2_CDMAC0_DMA_INTR_ON_FS |
482 	       CIO2_CDMAC0_DMA_HALTED, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
483 
484 	writel(1 << CIO2_CDMAC1_LINENUMUPDATE_SHIFT,
485 	       base + CIO2_REG_CDMAC1(CIO2_DMA_CHAN));
486 
487 	writel(0, base + CIO2_REG_PBM_FOPN_ABORT);
488 
489 	writel(CIO2_PXM_FRF_CFG_CRC_TH << CIO2_PXM_FRF_CFG_CRC_TH_SHIFT |
490 	       CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NR |
491 	       CIO2_PXM_FRF_CFG_MSK_ECC_RE |
492 	       CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NE,
493 	       base + CIO2_REG_PXM_FRF_CFG(q->csi2.port));
494 
495 	/* Clear interrupts */
496 	writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
497 	writel(~0, base + CIO2_REG_INT_STS_EXT_OE);
498 	writel(~0, base + CIO2_REG_INT_STS_EXT_IE);
499 	writel(~0, base + CIO2_REG_INT_STS);
500 
501 	/* Enable devices, starting from the last device in the pipe */
502 	writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
503 	writel(1, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
504 
505 	return 0;
506 }
507 
cio2_hw_exit(struct cio2_device * cio2,struct cio2_queue * q)508 static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q)
509 {
510 	void __iomem *const base = cio2->base;
511 	unsigned int i;
512 	u32 value;
513 	int ret;
514 
515 	/* Disable CSI receiver and MIPI backend devices */
516 	writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
517 	writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
518 	writel(0, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
519 	writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
520 
521 	/* Halt DMA */
522 	writel(0, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
523 	ret = readl_poll_timeout(base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN),
524 				 value, value & CIO2_CDMAC0_DMA_HALTED,
525 				 4000, 2000000);
526 	if (ret)
527 		dev_err(&cio2->pci_dev->dev,
528 			"DMA %i can not be halted\n", CIO2_DMA_CHAN);
529 
530 	for (i = 0; i < CIO2_NUM_PORTS; i++) {
531 		writel(readl(base + CIO2_REG_PXM_FRF_CFG(i)) |
532 		       CIO2_PXM_FRF_CFG_ABORT, base + CIO2_REG_PXM_FRF_CFG(i));
533 		writel(readl(base + CIO2_REG_PBM_FOPN_ABORT) |
534 		       CIO2_PBM_FOPN_ABORT(i), base + CIO2_REG_PBM_FOPN_ABORT);
535 	}
536 }
537 
cio2_buffer_done(struct cio2_device * cio2,unsigned int dma_chan)538 static void cio2_buffer_done(struct cio2_device *cio2, unsigned int dma_chan)
539 {
540 	struct device *dev = &cio2->pci_dev->dev;
541 	struct cio2_queue *q = cio2->cur_queue;
542 	struct cio2_fbpt_entry *entry;
543 	u64 ns = ktime_get_ns();
544 
545 	if (dma_chan >= CIO2_QUEUES) {
546 		dev_err(dev, "bad DMA channel %i\n", dma_chan);
547 		return;
548 	}
549 
550 	entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
551 	if (entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID) {
552 		dev_warn(&cio2->pci_dev->dev,
553 			 "no ready buffers found on DMA channel %u\n",
554 			 dma_chan);
555 		return;
556 	}
557 
558 	/* Find out which buffer(s) are ready */
559 	do {
560 		struct cio2_buffer *b;
561 
562 		b = q->bufs[q->bufs_first];
563 		if (b) {
564 			unsigned int bytes = entry[1].second_entry.num_of_bytes;
565 
566 			q->bufs[q->bufs_first] = NULL;
567 			atomic_dec(&q->bufs_queued);
568 			dev_dbg(&cio2->pci_dev->dev,
569 				"buffer %i done\n", b->vbb.vb2_buf.index);
570 
571 			b->vbb.vb2_buf.timestamp = ns;
572 			b->vbb.field = V4L2_FIELD_NONE;
573 			b->vbb.sequence = atomic_read(&q->frame_sequence);
574 			if (b->vbb.vb2_buf.planes[0].length != bytes)
575 				dev_warn(dev, "buffer length is %d received %d\n",
576 					 b->vbb.vb2_buf.planes[0].length,
577 					 bytes);
578 			vb2_buffer_done(&b->vbb.vb2_buf, VB2_BUF_STATE_DONE);
579 		}
580 		atomic_inc(&q->frame_sequence);
581 		cio2_fbpt_entry_init_dummy(cio2, entry);
582 		q->bufs_first = (q->bufs_first + 1) % CIO2_MAX_BUFFERS;
583 		entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
584 	} while (!(entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID));
585 }
586 
cio2_queue_event_sof(struct cio2_device * cio2,struct cio2_queue * q)587 static void cio2_queue_event_sof(struct cio2_device *cio2, struct cio2_queue *q)
588 {
589 	/*
590 	 * For the user space camera control algorithms it is essential
591 	 * to know when the reception of a frame has begun. That's often
592 	 * the best timing information to get from the hardware.
593 	 */
594 	struct v4l2_event event = {
595 		.type = V4L2_EVENT_FRAME_SYNC,
596 		.u.frame_sync.frame_sequence = atomic_read(&q->frame_sequence),
597 	};
598 
599 	v4l2_event_queue(q->subdev.devnode, &event);
600 }
601 
602 static const char *const cio2_irq_errs[] = {
603 	"single packet header error corrected",
604 	"multiple packet header errors detected",
605 	"payload checksum (CRC) error",
606 	"fifo overflow",
607 	"reserved short packet data type detected",
608 	"reserved long packet data type detected",
609 	"incomplete long packet detected",
610 	"frame sync error",
611 	"line sync error",
612 	"DPHY start of transmission error",
613 	"DPHY synchronization error",
614 	"escape mode error",
615 	"escape mode trigger event",
616 	"escape mode ultra-low power state for data lane(s)",
617 	"escape mode ultra-low power state exit for clock lane",
618 	"inter-frame short packet discarded",
619 	"inter-frame long packet discarded",
620 	"non-matching Long Packet stalled",
621 };
622 
623 static const char *const cio2_port_errs[] = {
624 	"ECC recoverable",
625 	"DPHY not recoverable",
626 	"ECC not recoverable",
627 	"CRC error",
628 	"INTERFRAMEDATA",
629 	"PKT2SHORT",
630 	"PKT2LONG",
631 };
632 
cio2_irq_handle_once(struct cio2_device * cio2,u32 int_status)633 static void cio2_irq_handle_once(struct cio2_device *cio2, u32 int_status)
634 {
635 	void __iomem *const base = cio2->base;
636 	struct device *dev = &cio2->pci_dev->dev;
637 
638 	if (int_status & CIO2_INT_IOOE) {
639 		/*
640 		 * Interrupt on Output Error:
641 		 * 1) SRAM is full and FS received, or
642 		 * 2) An invalid bit detected by DMA.
643 		 */
644 		u32 oe_status, oe_clear;
645 
646 		oe_clear = readl(base + CIO2_REG_INT_STS_EXT_OE);
647 		oe_status = oe_clear;
648 
649 		if (oe_status & CIO2_INT_EXT_OE_DMAOE_MASK) {
650 			dev_err(dev, "DMA output error: 0x%x\n",
651 				(oe_status & CIO2_INT_EXT_OE_DMAOE_MASK)
652 				>> CIO2_INT_EXT_OE_DMAOE_SHIFT);
653 			oe_status &= ~CIO2_INT_EXT_OE_DMAOE_MASK;
654 		}
655 		if (oe_status & CIO2_INT_EXT_OE_OES_MASK) {
656 			dev_err(dev, "DMA output error on CSI2 buses: 0x%x\n",
657 				(oe_status & CIO2_INT_EXT_OE_OES_MASK)
658 				>> CIO2_INT_EXT_OE_OES_SHIFT);
659 			oe_status &= ~CIO2_INT_EXT_OE_OES_MASK;
660 		}
661 		writel(oe_clear, base + CIO2_REG_INT_STS_EXT_OE);
662 		if (oe_status)
663 			dev_warn(dev, "unknown interrupt 0x%x on OE\n",
664 				 oe_status);
665 		int_status &= ~CIO2_INT_IOOE;
666 	}
667 
668 	if (int_status & CIO2_INT_IOC_MASK) {
669 		/* DMA IO done -- frame ready */
670 		u32 clr = 0;
671 		unsigned int d;
672 
673 		for (d = 0; d < CIO2_NUM_DMA_CHAN; d++)
674 			if (int_status & CIO2_INT_IOC(d)) {
675 				clr |= CIO2_INT_IOC(d);
676 				cio2_buffer_done(cio2, d);
677 			}
678 		int_status &= ~clr;
679 	}
680 
681 	if (int_status & CIO2_INT_IOS_IOLN_MASK) {
682 		/* DMA IO starts or reached specified line */
683 		u32 clr = 0;
684 		unsigned int d;
685 
686 		for (d = 0; d < CIO2_NUM_DMA_CHAN; d++)
687 			if (int_status & CIO2_INT_IOS_IOLN(d)) {
688 				clr |= CIO2_INT_IOS_IOLN(d);
689 				if (d == CIO2_DMA_CHAN)
690 					cio2_queue_event_sof(cio2,
691 							     cio2->cur_queue);
692 			}
693 		int_status &= ~clr;
694 	}
695 
696 	if (int_status & (CIO2_INT_IOIE | CIO2_INT_IOIRQ)) {
697 		/* CSI2 receiver (error) interrupt */
698 		u32 ie_status, ie_clear;
699 		unsigned int port;
700 
701 		ie_clear = readl(base + CIO2_REG_INT_STS_EXT_IE);
702 		ie_status = ie_clear;
703 
704 		for (port = 0; port < CIO2_NUM_PORTS; port++) {
705 			u32 port_status = (ie_status >> (port * 8)) & 0xff;
706 			u32 err_mask = BIT_MASK(ARRAY_SIZE(cio2_port_errs)) - 1;
707 			void __iomem *const csi_rx_base =
708 						base + CIO2_REG_PIPE_BASE(port);
709 			unsigned int i;
710 
711 			while (port_status & err_mask) {
712 				i = ffs(port_status) - 1;
713 				dev_err(dev, "port %i error %s\n",
714 					port, cio2_port_errs[i]);
715 				ie_status &= ~BIT(port * 8 + i);
716 				port_status &= ~BIT(i);
717 			}
718 
719 			if (ie_status & CIO2_INT_EXT_IE_IRQ(port)) {
720 				u32 csi2_status, csi2_clear;
721 
722 				csi2_status = readl(csi_rx_base +
723 						CIO2_REG_IRQCTRL_STATUS);
724 				csi2_clear = csi2_status;
725 				err_mask =
726 					BIT_MASK(ARRAY_SIZE(cio2_irq_errs)) - 1;
727 
728 				while (csi2_status & err_mask) {
729 					i = ffs(csi2_status) - 1;
730 					dev_err(dev,
731 						"CSI-2 receiver port %i: %s\n",
732 							port, cio2_irq_errs[i]);
733 					csi2_status &= ~BIT(i);
734 				}
735 
736 				writel(csi2_clear,
737 				       csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
738 				if (csi2_status)
739 					dev_warn(dev,
740 						 "unknown CSI2 error 0x%x on port %i\n",
741 						 csi2_status, port);
742 
743 				ie_status &= ~CIO2_INT_EXT_IE_IRQ(port);
744 			}
745 		}
746 
747 		writel(ie_clear, base + CIO2_REG_INT_STS_EXT_IE);
748 		if (ie_status)
749 			dev_warn(dev, "unknown interrupt 0x%x on IE\n",
750 				 ie_status);
751 
752 		int_status &= ~(CIO2_INT_IOIE | CIO2_INT_IOIRQ);
753 	}
754 
755 	if (int_status)
756 		dev_warn(dev, "unknown interrupt 0x%x on INT\n", int_status);
757 }
758 
cio2_irq(int irq,void * cio2_ptr)759 static irqreturn_t cio2_irq(int irq, void *cio2_ptr)
760 {
761 	struct cio2_device *cio2 = cio2_ptr;
762 	void __iomem *const base = cio2->base;
763 	struct device *dev = &cio2->pci_dev->dev;
764 	u32 int_status;
765 
766 	int_status = readl(base + CIO2_REG_INT_STS);
767 	dev_dbg(dev, "isr enter - interrupt status 0x%x\n", int_status);
768 	if (!int_status)
769 		return IRQ_NONE;
770 
771 	do {
772 		writel(int_status, base + CIO2_REG_INT_STS);
773 		cio2_irq_handle_once(cio2, int_status);
774 		int_status = readl(base + CIO2_REG_INT_STS);
775 		if (int_status)
776 			dev_dbg(dev, "pending status 0x%x\n", int_status);
777 	} while (int_status);
778 
779 	return IRQ_HANDLED;
780 }
781 
782 /**************** Videobuf2 interface ****************/
783 
cio2_vb2_return_all_buffers(struct cio2_queue * q,enum vb2_buffer_state state)784 static void cio2_vb2_return_all_buffers(struct cio2_queue *q,
785 					enum vb2_buffer_state state)
786 {
787 	unsigned int i;
788 
789 	for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
790 		if (q->bufs[i]) {
791 			atomic_dec(&q->bufs_queued);
792 			vb2_buffer_done(&q->bufs[i]->vbb.vb2_buf,
793 					state);
794 			q->bufs[i] = NULL;
795 		}
796 	}
797 }
798 
cio2_vb2_queue_setup(struct vb2_queue * vq,unsigned int * num_buffers,unsigned int * num_planes,unsigned int sizes[],struct device * alloc_devs[])799 static int cio2_vb2_queue_setup(struct vb2_queue *vq,
800 				unsigned int *num_buffers,
801 				unsigned int *num_planes,
802 				unsigned int sizes[],
803 				struct device *alloc_devs[])
804 {
805 	struct cio2_device *cio2 = vb2_get_drv_priv(vq);
806 	struct cio2_queue *q = vb2q_to_cio2_queue(vq);
807 	unsigned int i;
808 
809 	*num_planes = q->format.num_planes;
810 
811 	for (i = 0; i < *num_planes; ++i) {
812 		sizes[i] = q->format.plane_fmt[i].sizeimage;
813 		alloc_devs[i] = &cio2->pci_dev->dev;
814 	}
815 
816 	*num_buffers = clamp_val(*num_buffers, 1, CIO2_MAX_BUFFERS);
817 
818 	/* Initialize buffer queue */
819 	for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
820 		q->bufs[i] = NULL;
821 		cio2_fbpt_entry_init_dummy(cio2, &q->fbpt[i * CIO2_MAX_LOPS]);
822 	}
823 	atomic_set(&q->bufs_queued, 0);
824 	q->bufs_first = 0;
825 	q->bufs_next = 0;
826 
827 	return 0;
828 }
829 
830 /* Called after each buffer is allocated */
cio2_vb2_buf_init(struct vb2_buffer * vb)831 static int cio2_vb2_buf_init(struct vb2_buffer *vb)
832 {
833 	struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
834 	struct device *dev = &cio2->pci_dev->dev;
835 	struct cio2_buffer *b =
836 		container_of(vb, struct cio2_buffer, vbb.vb2_buf);
837 	unsigned int pages = PFN_UP(vb->planes[0].length);
838 	unsigned int lops = DIV_ROUND_UP(pages + 1, CIO2_LOP_ENTRIES);
839 	struct sg_table *sg;
840 	struct sg_dma_page_iter sg_iter;
841 	unsigned int i, j;
842 
843 	if (lops <= 0 || lops > CIO2_MAX_LOPS) {
844 		dev_err(dev, "%s: bad buffer size (%i)\n", __func__,
845 			vb->planes[0].length);
846 		return -ENOSPC;		/* Should never happen */
847 	}
848 
849 	memset(b->lop, 0, sizeof(b->lop));
850 	/* Allocate LOP table */
851 	for (i = 0; i < lops; i++) {
852 		b->lop[i] = dma_alloc_coherent(dev, PAGE_SIZE,
853 					       &b->lop_bus_addr[i], GFP_KERNEL);
854 		if (!b->lop[i])
855 			goto fail;
856 	}
857 
858 	/* Fill LOP */
859 	sg = vb2_dma_sg_plane_desc(vb, 0);
860 	if (!sg)
861 		return -ENOMEM;
862 
863 	if (sg->nents && sg->sgl)
864 		b->offset = sg->sgl->offset;
865 
866 	i = j = 0;
867 	for_each_sg_dma_page(sg->sgl, &sg_iter, sg->nents, 0) {
868 		if (!pages--)
869 			break;
870 		b->lop[i][j] = PFN_DOWN(sg_page_iter_dma_address(&sg_iter));
871 		j++;
872 		if (j == CIO2_LOP_ENTRIES) {
873 			i++;
874 			j = 0;
875 		}
876 	}
877 
878 	b->lop[i][j] = PFN_DOWN(cio2->dummy_page_bus_addr);
879 	return 0;
880 fail:
881 	while (i--)
882 		dma_free_coherent(dev, PAGE_SIZE, b->lop[i], b->lop_bus_addr[i]);
883 	return -ENOMEM;
884 }
885 
886 /* Transfer buffer ownership to cio2 */
cio2_vb2_buf_queue(struct vb2_buffer * vb)887 static void cio2_vb2_buf_queue(struct vb2_buffer *vb)
888 {
889 	struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
890 	struct cio2_queue *q =
891 		container_of(vb->vb2_queue, struct cio2_queue, vbq);
892 	struct cio2_buffer *b =
893 		container_of(vb, struct cio2_buffer, vbb.vb2_buf);
894 	struct cio2_fbpt_entry *entry;
895 	unsigned long flags;
896 	unsigned int i, j, next = q->bufs_next;
897 	int bufs_queued = atomic_inc_return(&q->bufs_queued);
898 	u32 fbpt_rp;
899 
900 	dev_dbg(&cio2->pci_dev->dev, "queue buffer %d\n", vb->index);
901 
902 	/*
903 	 * This code queues the buffer to the CIO2 DMA engine, which starts
904 	 * running once streaming has started. It is possible that this code
905 	 * gets pre-empted due to increased CPU load. Upon this, the driver
906 	 * does not get an opportunity to queue new buffers to the CIO2 DMA
907 	 * engine. When the DMA engine encounters an FBPT entry without the
908 	 * VALID bit set, the DMA engine halts, which requires a restart of
909 	 * the DMA engine and sensor, to continue streaming.
910 	 * This is not desired and is highly unlikely given that there are
911 	 * 32 FBPT entries that the DMA engine needs to process, to run into
912 	 * an FBPT entry, without the VALID bit set. We try to mitigate this
913 	 * by disabling interrupts for the duration of this queueing.
914 	 */
915 	local_irq_save(flags);
916 
917 	fbpt_rp = (readl(cio2->base + CIO2_REG_CDMARI(CIO2_DMA_CHAN))
918 		   >> CIO2_CDMARI_FBPT_RP_SHIFT)
919 		   & CIO2_CDMARI_FBPT_RP_MASK;
920 
921 	/*
922 	 * fbpt_rp is the fbpt entry that the dma is currently working
923 	 * on, but since it could jump to next entry at any time,
924 	 * assume that we might already be there.
925 	 */
926 	fbpt_rp = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
927 
928 	if (bufs_queued <= 1 || fbpt_rp == next)
929 		/* Buffers were drained */
930 		next = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
931 
932 	for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
933 		/*
934 		 * We have allocated CIO2_MAX_BUFFERS circularly for the
935 		 * hw, the user has requested N buffer queue. The driver
936 		 * ensures N <= CIO2_MAX_BUFFERS and guarantees that whenever
937 		 * user queues a buffer, there necessarily is a free buffer.
938 		 */
939 		if (!q->bufs[next]) {
940 			q->bufs[next] = b;
941 			entry = &q->fbpt[next * CIO2_MAX_LOPS];
942 			cio2_fbpt_entry_init_buf(cio2, b, entry);
943 			local_irq_restore(flags);
944 			q->bufs_next = (next + 1) % CIO2_MAX_BUFFERS;
945 			for (j = 0; j < vb->num_planes; j++)
946 				vb2_set_plane_payload(vb, j,
947 					q->format.plane_fmt[j].sizeimage);
948 			return;
949 		}
950 
951 		dev_dbg(&cio2->pci_dev->dev, "entry %i was full!\n", next);
952 		next = (next + 1) % CIO2_MAX_BUFFERS;
953 	}
954 
955 	local_irq_restore(flags);
956 	dev_err(&cio2->pci_dev->dev, "error: all cio2 entries were full!\n");
957 	atomic_dec(&q->bufs_queued);
958 	vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
959 }
960 
961 /* Called when each buffer is freed */
cio2_vb2_buf_cleanup(struct vb2_buffer * vb)962 static void cio2_vb2_buf_cleanup(struct vb2_buffer *vb)
963 {
964 	struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
965 	struct cio2_buffer *b =
966 		container_of(vb, struct cio2_buffer, vbb.vb2_buf);
967 	unsigned int i;
968 
969 	/* Free LOP table */
970 	for (i = 0; i < CIO2_MAX_LOPS; i++) {
971 		if (b->lop[i])
972 			dma_free_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
973 					  b->lop[i], b->lop_bus_addr[i]);
974 	}
975 }
976 
cio2_vb2_start_streaming(struct vb2_queue * vq,unsigned int count)977 static int cio2_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
978 {
979 	struct cio2_queue *q = vb2q_to_cio2_queue(vq);
980 	struct cio2_device *cio2 = vb2_get_drv_priv(vq);
981 	int r;
982 
983 	cio2->cur_queue = q;
984 	atomic_set(&q->frame_sequence, 0);
985 
986 	r = pm_runtime_get_sync(&cio2->pci_dev->dev);
987 	if (r < 0) {
988 		dev_info(&cio2->pci_dev->dev, "failed to set power %d\n", r);
989 		pm_runtime_put_noidle(&cio2->pci_dev->dev);
990 		return r;
991 	}
992 
993 	r = media_pipeline_start(&q->vdev.entity, &q->pipe);
994 	if (r)
995 		goto fail_pipeline;
996 
997 	r = cio2_hw_init(cio2, q);
998 	if (r)
999 		goto fail_hw;
1000 
1001 	/* Start streaming on sensor */
1002 	r = v4l2_subdev_call(q->sensor, video, s_stream, 1);
1003 	if (r)
1004 		goto fail_csi2_subdev;
1005 
1006 	cio2->streaming = true;
1007 
1008 	return 0;
1009 
1010 fail_csi2_subdev:
1011 	cio2_hw_exit(cio2, q);
1012 fail_hw:
1013 	media_pipeline_stop(&q->vdev.entity);
1014 fail_pipeline:
1015 	dev_dbg(&cio2->pci_dev->dev, "failed to start streaming (%d)\n", r);
1016 	cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_QUEUED);
1017 	pm_runtime_put(&cio2->pci_dev->dev);
1018 
1019 	return r;
1020 }
1021 
cio2_vb2_stop_streaming(struct vb2_queue * vq)1022 static void cio2_vb2_stop_streaming(struct vb2_queue *vq)
1023 {
1024 	struct cio2_queue *q = vb2q_to_cio2_queue(vq);
1025 	struct cio2_device *cio2 = vb2_get_drv_priv(vq);
1026 
1027 	if (v4l2_subdev_call(q->sensor, video, s_stream, 0))
1028 		dev_err(&cio2->pci_dev->dev,
1029 			"failed to stop sensor streaming\n");
1030 
1031 	cio2_hw_exit(cio2, q);
1032 	synchronize_irq(cio2->pci_dev->irq);
1033 	cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_ERROR);
1034 	media_pipeline_stop(&q->vdev.entity);
1035 	pm_runtime_put(&cio2->pci_dev->dev);
1036 	cio2->streaming = false;
1037 }
1038 
1039 static const struct vb2_ops cio2_vb2_ops = {
1040 	.buf_init = cio2_vb2_buf_init,
1041 	.buf_queue = cio2_vb2_buf_queue,
1042 	.buf_cleanup = cio2_vb2_buf_cleanup,
1043 	.queue_setup = cio2_vb2_queue_setup,
1044 	.start_streaming = cio2_vb2_start_streaming,
1045 	.stop_streaming = cio2_vb2_stop_streaming,
1046 	.wait_prepare = vb2_ops_wait_prepare,
1047 	.wait_finish = vb2_ops_wait_finish,
1048 };
1049 
1050 /**************** V4L2 interface ****************/
1051 
cio2_v4l2_querycap(struct file * file,void * fh,struct v4l2_capability * cap)1052 static int cio2_v4l2_querycap(struct file *file, void *fh,
1053 			      struct v4l2_capability *cap)
1054 {
1055 	struct cio2_device *cio2 = video_drvdata(file);
1056 
1057 	strscpy(cap->driver, CIO2_NAME, sizeof(cap->driver));
1058 	strscpy(cap->card, CIO2_DEVICE_NAME, sizeof(cap->card));
1059 	snprintf(cap->bus_info, sizeof(cap->bus_info),
1060 		 "PCI:%s", pci_name(cio2->pci_dev));
1061 
1062 	return 0;
1063 }
1064 
cio2_v4l2_enum_fmt(struct file * file,void * fh,struct v4l2_fmtdesc * f)1065 static int cio2_v4l2_enum_fmt(struct file *file, void *fh,
1066 			      struct v4l2_fmtdesc *f)
1067 {
1068 	if (f->index >= ARRAY_SIZE(formats))
1069 		return -EINVAL;
1070 
1071 	f->pixelformat = formats[f->index].fourcc;
1072 
1073 	return 0;
1074 }
1075 
1076 /* The format is validated in cio2_video_link_validate() */
cio2_v4l2_g_fmt(struct file * file,void * fh,struct v4l2_format * f)1077 static int cio2_v4l2_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
1078 {
1079 	struct cio2_queue *q = file_to_cio2_queue(file);
1080 
1081 	f->fmt.pix_mp = q->format;
1082 
1083 	return 0;
1084 }
1085 
cio2_v4l2_try_fmt(struct file * file,void * fh,struct v4l2_format * f)1086 static int cio2_v4l2_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
1087 {
1088 	const struct ipu3_cio2_fmt *fmt;
1089 	struct v4l2_pix_format_mplane *mpix = &f->fmt.pix_mp;
1090 
1091 	fmt = cio2_find_format(&mpix->pixelformat, NULL);
1092 	if (!fmt)
1093 		fmt = &formats[0];
1094 
1095 	/* Only supports up to 4224x3136 */
1096 	if (mpix->width > CIO2_IMAGE_MAX_WIDTH)
1097 		mpix->width = CIO2_IMAGE_MAX_WIDTH;
1098 	if (mpix->height > CIO2_IMAGE_MAX_LENGTH)
1099 		mpix->height = CIO2_IMAGE_MAX_LENGTH;
1100 
1101 	mpix->num_planes = 1;
1102 	mpix->pixelformat = fmt->fourcc;
1103 	mpix->colorspace = V4L2_COLORSPACE_RAW;
1104 	mpix->field = V4L2_FIELD_NONE;
1105 	memset(mpix->reserved, 0, sizeof(mpix->reserved));
1106 	mpix->plane_fmt[0].bytesperline = cio2_bytesperline(mpix->width);
1107 	mpix->plane_fmt[0].sizeimage = mpix->plane_fmt[0].bytesperline *
1108 							mpix->height;
1109 	memset(mpix->plane_fmt[0].reserved, 0,
1110 	       sizeof(mpix->plane_fmt[0].reserved));
1111 
1112 	/* use default */
1113 	mpix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
1114 	mpix->quantization = V4L2_QUANTIZATION_DEFAULT;
1115 	mpix->xfer_func = V4L2_XFER_FUNC_DEFAULT;
1116 
1117 	return 0;
1118 }
1119 
cio2_v4l2_s_fmt(struct file * file,void * fh,struct v4l2_format * f)1120 static int cio2_v4l2_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
1121 {
1122 	struct cio2_queue *q = file_to_cio2_queue(file);
1123 
1124 	cio2_v4l2_try_fmt(file, fh, f);
1125 	q->format = f->fmt.pix_mp;
1126 
1127 	return 0;
1128 }
1129 
1130 static int
cio2_video_enum_input(struct file * file,void * fh,struct v4l2_input * input)1131 cio2_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
1132 {
1133 	if (input->index > 0)
1134 		return -EINVAL;
1135 
1136 	strscpy(input->name, "camera", sizeof(input->name));
1137 	input->type = V4L2_INPUT_TYPE_CAMERA;
1138 
1139 	return 0;
1140 }
1141 
1142 static int
cio2_video_g_input(struct file * file,void * fh,unsigned int * input)1143 cio2_video_g_input(struct file *file, void *fh, unsigned int *input)
1144 {
1145 	*input = 0;
1146 
1147 	return 0;
1148 }
1149 
1150 static int
cio2_video_s_input(struct file * file,void * fh,unsigned int input)1151 cio2_video_s_input(struct file *file, void *fh, unsigned int input)
1152 {
1153 	return input == 0 ? 0 : -EINVAL;
1154 }
1155 
1156 static const struct v4l2_file_operations cio2_v4l2_fops = {
1157 	.owner = THIS_MODULE,
1158 	.unlocked_ioctl = video_ioctl2,
1159 	.open = v4l2_fh_open,
1160 	.release = vb2_fop_release,
1161 	.poll = vb2_fop_poll,
1162 	.mmap = vb2_fop_mmap,
1163 };
1164 
1165 static const struct v4l2_ioctl_ops cio2_v4l2_ioctl_ops = {
1166 	.vidioc_querycap = cio2_v4l2_querycap,
1167 	.vidioc_enum_fmt_vid_cap = cio2_v4l2_enum_fmt,
1168 	.vidioc_g_fmt_vid_cap_mplane = cio2_v4l2_g_fmt,
1169 	.vidioc_s_fmt_vid_cap_mplane = cio2_v4l2_s_fmt,
1170 	.vidioc_try_fmt_vid_cap_mplane = cio2_v4l2_try_fmt,
1171 	.vidioc_reqbufs = vb2_ioctl_reqbufs,
1172 	.vidioc_create_bufs = vb2_ioctl_create_bufs,
1173 	.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
1174 	.vidioc_querybuf = vb2_ioctl_querybuf,
1175 	.vidioc_qbuf = vb2_ioctl_qbuf,
1176 	.vidioc_dqbuf = vb2_ioctl_dqbuf,
1177 	.vidioc_streamon = vb2_ioctl_streamon,
1178 	.vidioc_streamoff = vb2_ioctl_streamoff,
1179 	.vidioc_expbuf = vb2_ioctl_expbuf,
1180 	.vidioc_enum_input = cio2_video_enum_input,
1181 	.vidioc_g_input	= cio2_video_g_input,
1182 	.vidioc_s_input	= cio2_video_s_input,
1183 };
1184 
cio2_subdev_subscribe_event(struct v4l2_subdev * sd,struct v4l2_fh * fh,struct v4l2_event_subscription * sub)1185 static int cio2_subdev_subscribe_event(struct v4l2_subdev *sd,
1186 				       struct v4l2_fh *fh,
1187 				       struct v4l2_event_subscription *sub)
1188 {
1189 	if (sub->type != V4L2_EVENT_FRAME_SYNC)
1190 		return -EINVAL;
1191 
1192 	/* Line number. For now only zero accepted. */
1193 	if (sub->id != 0)
1194 		return -EINVAL;
1195 
1196 	return v4l2_event_subscribe(fh, sub, 0, NULL);
1197 }
1198 
cio2_subdev_open(struct v4l2_subdev * sd,struct v4l2_subdev_fh * fh)1199 static int cio2_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
1200 {
1201 	struct v4l2_mbus_framefmt *format;
1202 	const struct v4l2_mbus_framefmt fmt_default = {
1203 		.width = 1936,
1204 		.height = 1096,
1205 		.code = formats[0].mbus_code,
1206 		.field = V4L2_FIELD_NONE,
1207 		.colorspace = V4L2_COLORSPACE_RAW,
1208 		.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT,
1209 		.quantization = V4L2_QUANTIZATION_DEFAULT,
1210 		.xfer_func = V4L2_XFER_FUNC_DEFAULT,
1211 	};
1212 
1213 	/* Initialize try_fmt */
1214 	format = v4l2_subdev_get_try_format(sd, fh->pad, CIO2_PAD_SINK);
1215 	*format = fmt_default;
1216 
1217 	/* same as sink */
1218 	format = v4l2_subdev_get_try_format(sd, fh->pad, CIO2_PAD_SOURCE);
1219 	*format = fmt_default;
1220 
1221 	return 0;
1222 }
1223 
1224 /*
1225  * cio2_subdev_get_fmt - Handle get format by pads subdev method
1226  * @sd : pointer to v4l2 subdev structure
1227  * @cfg: V4L2 subdev pad config
1228  * @fmt: pointer to v4l2 subdev format structure
1229  * return -EINVAL or zero on success
1230  */
cio2_subdev_get_fmt(struct v4l2_subdev * sd,struct v4l2_subdev_pad_config * cfg,struct v4l2_subdev_format * fmt)1231 static int cio2_subdev_get_fmt(struct v4l2_subdev *sd,
1232 			       struct v4l2_subdev_pad_config *cfg,
1233 			       struct v4l2_subdev_format *fmt)
1234 {
1235 	struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
1236 
1237 	mutex_lock(&q->subdev_lock);
1238 
1239 	if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
1240 		fmt->format = *v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
1241 	else
1242 		fmt->format = q->subdev_fmt;
1243 
1244 	mutex_unlock(&q->subdev_lock);
1245 
1246 	return 0;
1247 }
1248 
1249 /*
1250  * cio2_subdev_set_fmt - Handle set format by pads subdev method
1251  * @sd : pointer to v4l2 subdev structure
1252  * @cfg: V4L2 subdev pad config
1253  * @fmt: pointer to v4l2 subdev format structure
1254  * return -EINVAL or zero on success
1255  */
cio2_subdev_set_fmt(struct v4l2_subdev * sd,struct v4l2_subdev_pad_config * cfg,struct v4l2_subdev_format * fmt)1256 static int cio2_subdev_set_fmt(struct v4l2_subdev *sd,
1257 			       struct v4l2_subdev_pad_config *cfg,
1258 			       struct v4l2_subdev_format *fmt)
1259 {
1260 	struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
1261 	struct v4l2_mbus_framefmt *mbus;
1262 	u32 mbus_code = fmt->format.code;
1263 	unsigned int i;
1264 
1265 	/*
1266 	 * Only allow setting sink pad format;
1267 	 * source always propagates from sink
1268 	 */
1269 	if (fmt->pad == CIO2_PAD_SOURCE)
1270 		return cio2_subdev_get_fmt(sd, cfg, fmt);
1271 
1272 	if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
1273 		mbus = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
1274 	else
1275 		mbus = &q->subdev_fmt;
1276 
1277 	fmt->format.code = formats[0].mbus_code;
1278 
1279 	for (i = 0; i < ARRAY_SIZE(formats); i++) {
1280 		if (formats[i].mbus_code == mbus_code) {
1281 			fmt->format.code = mbus_code;
1282 			break;
1283 		}
1284 	}
1285 
1286 	fmt->format.width = min_t(u32, fmt->format.width, CIO2_IMAGE_MAX_WIDTH);
1287 	fmt->format.height = min_t(u32, fmt->format.height,
1288 				   CIO2_IMAGE_MAX_LENGTH);
1289 	fmt->format.field = V4L2_FIELD_NONE;
1290 
1291 	mutex_lock(&q->subdev_lock);
1292 	*mbus = fmt->format;
1293 	mutex_unlock(&q->subdev_lock);
1294 
1295 	return 0;
1296 }
1297 
cio2_subdev_enum_mbus_code(struct v4l2_subdev * sd,struct v4l2_subdev_pad_config * cfg,struct v4l2_subdev_mbus_code_enum * code)1298 static int cio2_subdev_enum_mbus_code(struct v4l2_subdev *sd,
1299 				      struct v4l2_subdev_pad_config *cfg,
1300 				      struct v4l2_subdev_mbus_code_enum *code)
1301 {
1302 	if (code->index >= ARRAY_SIZE(formats))
1303 		return -EINVAL;
1304 
1305 	code->code = formats[code->index].mbus_code;
1306 	return 0;
1307 }
1308 
cio2_subdev_link_validate_get_format(struct media_pad * pad,struct v4l2_subdev_format * fmt)1309 static int cio2_subdev_link_validate_get_format(struct media_pad *pad,
1310 						struct v4l2_subdev_format *fmt)
1311 {
1312 	if (is_media_entity_v4l2_subdev(pad->entity)) {
1313 		struct v4l2_subdev *sd =
1314 			media_entity_to_v4l2_subdev(pad->entity);
1315 
1316 		fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
1317 		fmt->pad = pad->index;
1318 		return v4l2_subdev_call(sd, pad, get_fmt, NULL, fmt);
1319 	}
1320 
1321 	return -EINVAL;
1322 }
1323 
cio2_video_link_validate(struct media_link * link)1324 static int cio2_video_link_validate(struct media_link *link)
1325 {
1326 	struct video_device *vd = container_of(link->sink->entity,
1327 						struct video_device, entity);
1328 	struct cio2_queue *q = container_of(vd, struct cio2_queue, vdev);
1329 	struct cio2_device *cio2 = video_get_drvdata(vd);
1330 	struct v4l2_subdev_format source_fmt;
1331 	int ret;
1332 
1333 	if (!media_entity_remote_pad(link->sink->entity->pads)) {
1334 		dev_info(&cio2->pci_dev->dev,
1335 			 "video node %s pad not connected\n", vd->name);
1336 		return -ENOTCONN;
1337 	}
1338 
1339 	ret = cio2_subdev_link_validate_get_format(link->source, &source_fmt);
1340 	if (ret < 0)
1341 		return 0;
1342 
1343 	if (source_fmt.format.width != q->format.width ||
1344 	    source_fmt.format.height != q->format.height) {
1345 		dev_err(&cio2->pci_dev->dev,
1346 			"Wrong width or height %ux%u (%ux%u expected)\n",
1347 			q->format.width, q->format.height,
1348 			source_fmt.format.width, source_fmt.format.height);
1349 		return -EINVAL;
1350 	}
1351 
1352 	if (!cio2_find_format(&q->format.pixelformat, &source_fmt.format.code))
1353 		return -EINVAL;
1354 
1355 	return 0;
1356 }
1357 
1358 static const struct v4l2_subdev_core_ops cio2_subdev_core_ops = {
1359 	.subscribe_event = cio2_subdev_subscribe_event,
1360 	.unsubscribe_event = v4l2_event_subdev_unsubscribe,
1361 };
1362 
1363 static const struct v4l2_subdev_internal_ops cio2_subdev_internal_ops = {
1364 	.open = cio2_subdev_open,
1365 };
1366 
1367 static const struct v4l2_subdev_pad_ops cio2_subdev_pad_ops = {
1368 	.link_validate = v4l2_subdev_link_validate_default,
1369 	.get_fmt = cio2_subdev_get_fmt,
1370 	.set_fmt = cio2_subdev_set_fmt,
1371 	.enum_mbus_code = cio2_subdev_enum_mbus_code,
1372 };
1373 
1374 static const struct v4l2_subdev_ops cio2_subdev_ops = {
1375 	.core = &cio2_subdev_core_ops,
1376 	.pad = &cio2_subdev_pad_ops,
1377 };
1378 
1379 /******* V4L2 sub-device asynchronous registration callbacks***********/
1380 
1381 struct sensor_async_subdev {
1382 	struct v4l2_async_subdev asd;
1383 	struct csi2_bus_info csi2;
1384 };
1385 
1386 /* The .bound() notifier callback when a match is found */
cio2_notifier_bound(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd,struct v4l2_async_subdev * asd)1387 static int cio2_notifier_bound(struct v4l2_async_notifier *notifier,
1388 			       struct v4l2_subdev *sd,
1389 			       struct v4l2_async_subdev *asd)
1390 {
1391 	struct cio2_device *cio2 = container_of(notifier,
1392 					struct cio2_device, notifier);
1393 	struct sensor_async_subdev *s_asd = container_of(asd,
1394 					struct sensor_async_subdev, asd);
1395 	struct cio2_queue *q;
1396 
1397 	if (cio2->queue[s_asd->csi2.port].sensor)
1398 		return -EBUSY;
1399 
1400 	q = &cio2->queue[s_asd->csi2.port];
1401 
1402 	q->csi2 = s_asd->csi2;
1403 	q->sensor = sd;
1404 	q->csi_rx_base = cio2->base + CIO2_REG_PIPE_BASE(q->csi2.port);
1405 
1406 	return 0;
1407 }
1408 
1409 /* The .unbind callback */
cio2_notifier_unbind(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd,struct v4l2_async_subdev * asd)1410 static void cio2_notifier_unbind(struct v4l2_async_notifier *notifier,
1411 				 struct v4l2_subdev *sd,
1412 				 struct v4l2_async_subdev *asd)
1413 {
1414 	struct cio2_device *cio2 = container_of(notifier,
1415 						struct cio2_device, notifier);
1416 	struct sensor_async_subdev *s_asd = container_of(asd,
1417 					struct sensor_async_subdev, asd);
1418 
1419 	cio2->queue[s_asd->csi2.port].sensor = NULL;
1420 }
1421 
1422 /* .complete() is called after all subdevices have been located */
cio2_notifier_complete(struct v4l2_async_notifier * notifier)1423 static int cio2_notifier_complete(struct v4l2_async_notifier *notifier)
1424 {
1425 	struct cio2_device *cio2 = container_of(notifier, struct cio2_device,
1426 						notifier);
1427 	struct sensor_async_subdev *s_asd;
1428 	struct v4l2_async_subdev *asd;
1429 	struct cio2_queue *q;
1430 	unsigned int pad;
1431 	int ret;
1432 
1433 	list_for_each_entry(asd, &cio2->notifier.asd_list, asd_list) {
1434 		s_asd = container_of(asd, struct sensor_async_subdev, asd);
1435 		q = &cio2->queue[s_asd->csi2.port];
1436 
1437 		for (pad = 0; pad < q->sensor->entity.num_pads; pad++)
1438 			if (q->sensor->entity.pads[pad].flags &
1439 						MEDIA_PAD_FL_SOURCE)
1440 				break;
1441 
1442 		if (pad == q->sensor->entity.num_pads) {
1443 			dev_err(&cio2->pci_dev->dev,
1444 				"failed to find src pad for %s\n",
1445 				q->sensor->name);
1446 			return -ENXIO;
1447 		}
1448 
1449 		ret = media_create_pad_link(
1450 				&q->sensor->entity, pad,
1451 				&q->subdev.entity, CIO2_PAD_SINK,
1452 				0);
1453 		if (ret) {
1454 			dev_err(&cio2->pci_dev->dev,
1455 				"failed to create link for %s\n",
1456 				q->sensor->name);
1457 			return ret;
1458 		}
1459 	}
1460 
1461 	return v4l2_device_register_subdev_nodes(&cio2->v4l2_dev);
1462 }
1463 
1464 static const struct v4l2_async_notifier_operations cio2_async_ops = {
1465 	.bound = cio2_notifier_bound,
1466 	.unbind = cio2_notifier_unbind,
1467 	.complete = cio2_notifier_complete,
1468 };
1469 
cio2_parse_firmware(struct cio2_device * cio2)1470 static int cio2_parse_firmware(struct cio2_device *cio2)
1471 {
1472 	unsigned int i;
1473 	int ret;
1474 
1475 	for (i = 0; i < CIO2_NUM_PORTS; i++) {
1476 		struct v4l2_fwnode_endpoint vep = {
1477 			.bus_type = V4L2_MBUS_CSI2_DPHY
1478 		};
1479 		struct sensor_async_subdev *s_asd;
1480 		struct v4l2_async_subdev *asd;
1481 		struct fwnode_handle *ep;
1482 
1483 		ep = fwnode_graph_get_endpoint_by_id(
1484 			dev_fwnode(&cio2->pci_dev->dev), i, 0,
1485 			FWNODE_GRAPH_ENDPOINT_NEXT);
1486 
1487 		if (!ep)
1488 			continue;
1489 
1490 		ret = v4l2_fwnode_endpoint_parse(ep, &vep);
1491 		if (ret)
1492 			goto err_parse;
1493 
1494 		asd = v4l2_async_notifier_add_fwnode_remote_subdev(
1495 				&cio2->notifier, ep, sizeof(*s_asd));
1496 		if (IS_ERR(asd)) {
1497 			ret = PTR_ERR(asd);
1498 			goto err_parse;
1499 		}
1500 
1501 		s_asd = container_of(asd, struct sensor_async_subdev, asd);
1502 		s_asd->csi2.port = vep.base.port;
1503 		s_asd->csi2.lanes = vep.bus.mipi_csi2.num_data_lanes;
1504 
1505 		fwnode_handle_put(ep);
1506 
1507 		continue;
1508 
1509 err_parse:
1510 		fwnode_handle_put(ep);
1511 		return ret;
1512 	}
1513 
1514 	/*
1515 	 * Proceed even without sensors connected to allow the device to
1516 	 * suspend.
1517 	 */
1518 	cio2->notifier.ops = &cio2_async_ops;
1519 	ret = v4l2_async_notifier_register(&cio2->v4l2_dev, &cio2->notifier);
1520 	if (ret)
1521 		dev_err(&cio2->pci_dev->dev,
1522 			"failed to register async notifier : %d\n", ret);
1523 
1524 	return ret;
1525 }
1526 
1527 /**************** Queue initialization ****************/
1528 static const struct media_entity_operations cio2_media_ops = {
1529 	.link_validate = v4l2_subdev_link_validate,
1530 };
1531 
1532 static const struct media_entity_operations cio2_video_entity_ops = {
1533 	.link_validate = cio2_video_link_validate,
1534 };
1535 
cio2_queue_init(struct cio2_device * cio2,struct cio2_queue * q)1536 static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
1537 {
1538 	static const u32 default_width = 1936;
1539 	static const u32 default_height = 1096;
1540 	const struct ipu3_cio2_fmt dflt_fmt = formats[0];
1541 
1542 	struct video_device *vdev = &q->vdev;
1543 	struct vb2_queue *vbq = &q->vbq;
1544 	struct v4l2_subdev *subdev = &q->subdev;
1545 	struct v4l2_mbus_framefmt *fmt;
1546 	int r;
1547 
1548 	/* Initialize miscellaneous variables */
1549 	mutex_init(&q->lock);
1550 	mutex_init(&q->subdev_lock);
1551 
1552 	/* Initialize formats to default values */
1553 	fmt = &q->subdev_fmt;
1554 	fmt->width = default_width;
1555 	fmt->height = default_height;
1556 	fmt->code = dflt_fmt.mbus_code;
1557 	fmt->field = V4L2_FIELD_NONE;
1558 
1559 	q->format.width = default_width;
1560 	q->format.height = default_height;
1561 	q->format.pixelformat = dflt_fmt.fourcc;
1562 	q->format.colorspace = V4L2_COLORSPACE_RAW;
1563 	q->format.field = V4L2_FIELD_NONE;
1564 	q->format.num_planes = 1;
1565 	q->format.plane_fmt[0].bytesperline =
1566 				cio2_bytesperline(q->format.width);
1567 	q->format.plane_fmt[0].sizeimage = q->format.plane_fmt[0].bytesperline *
1568 						q->format.height;
1569 
1570 	/* Initialize fbpt */
1571 	r = cio2_fbpt_init(cio2, q);
1572 	if (r)
1573 		goto fail_fbpt;
1574 
1575 	/* Initialize media entities */
1576 	q->subdev_pads[CIO2_PAD_SINK].flags = MEDIA_PAD_FL_SINK |
1577 		MEDIA_PAD_FL_MUST_CONNECT;
1578 	q->subdev_pads[CIO2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
1579 	subdev->entity.ops = &cio2_media_ops;
1580 	subdev->internal_ops = &cio2_subdev_internal_ops;
1581 	r = media_entity_pads_init(&subdev->entity, CIO2_PADS, q->subdev_pads);
1582 	if (r) {
1583 		dev_err(&cio2->pci_dev->dev,
1584 			"failed initialize subdev media entity (%d)\n", r);
1585 		goto fail_subdev_media_entity;
1586 	}
1587 
1588 	q->vdev_pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT;
1589 	vdev->entity.ops = &cio2_video_entity_ops;
1590 	r = media_entity_pads_init(&vdev->entity, 1, &q->vdev_pad);
1591 	if (r) {
1592 		dev_err(&cio2->pci_dev->dev,
1593 			"failed initialize videodev media entity (%d)\n", r);
1594 		goto fail_vdev_media_entity;
1595 	}
1596 
1597 	/* Initialize subdev */
1598 	v4l2_subdev_init(subdev, &cio2_subdev_ops);
1599 	subdev->flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
1600 	subdev->owner = THIS_MODULE;
1601 	snprintf(subdev->name, sizeof(subdev->name),
1602 		 CIO2_ENTITY_NAME " %td", q - cio2->queue);
1603 	subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
1604 	v4l2_set_subdevdata(subdev, cio2);
1605 	r = v4l2_device_register_subdev(&cio2->v4l2_dev, subdev);
1606 	if (r) {
1607 		dev_err(&cio2->pci_dev->dev,
1608 			"failed initialize subdev (%d)\n", r);
1609 		goto fail_subdev;
1610 	}
1611 
1612 	/* Initialize vbq */
1613 	vbq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
1614 	vbq->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF;
1615 	vbq->ops = &cio2_vb2_ops;
1616 	vbq->mem_ops = &vb2_dma_sg_memops;
1617 	vbq->buf_struct_size = sizeof(struct cio2_buffer);
1618 	vbq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1619 	vbq->min_buffers_needed = 1;
1620 	vbq->drv_priv = cio2;
1621 	vbq->lock = &q->lock;
1622 	r = vb2_queue_init(vbq);
1623 	if (r) {
1624 		dev_err(&cio2->pci_dev->dev,
1625 			"failed to initialize videobuf2 queue (%d)\n", r);
1626 		goto fail_subdev;
1627 	}
1628 
1629 	/* Initialize vdev */
1630 	snprintf(vdev->name, sizeof(vdev->name),
1631 		 "%s %td", CIO2_NAME, q - cio2->queue);
1632 	vdev->release = video_device_release_empty;
1633 	vdev->fops = &cio2_v4l2_fops;
1634 	vdev->ioctl_ops = &cio2_v4l2_ioctl_ops;
1635 	vdev->lock = &cio2->lock;
1636 	vdev->v4l2_dev = &cio2->v4l2_dev;
1637 	vdev->queue = &q->vbq;
1638 	vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_STREAMING;
1639 	video_set_drvdata(vdev, cio2);
1640 	r = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
1641 	if (r) {
1642 		dev_err(&cio2->pci_dev->dev,
1643 			"failed to register video device (%d)\n", r);
1644 		goto fail_vdev;
1645 	}
1646 
1647 	/* Create link from CIO2 subdev to output node */
1648 	r = media_create_pad_link(
1649 		&subdev->entity, CIO2_PAD_SOURCE, &vdev->entity, 0,
1650 		MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
1651 	if (r)
1652 		goto fail_link;
1653 
1654 	return 0;
1655 
1656 fail_link:
1657 	vb2_video_unregister_device(&q->vdev);
1658 fail_vdev:
1659 	v4l2_device_unregister_subdev(subdev);
1660 fail_subdev:
1661 	media_entity_cleanup(&vdev->entity);
1662 fail_vdev_media_entity:
1663 	media_entity_cleanup(&subdev->entity);
1664 fail_subdev_media_entity:
1665 	cio2_fbpt_exit(q, &cio2->pci_dev->dev);
1666 fail_fbpt:
1667 	mutex_destroy(&q->subdev_lock);
1668 	mutex_destroy(&q->lock);
1669 
1670 	return r;
1671 }
1672 
cio2_queue_exit(struct cio2_device * cio2,struct cio2_queue * q)1673 static void cio2_queue_exit(struct cio2_device *cio2, struct cio2_queue *q)
1674 {
1675 	vb2_video_unregister_device(&q->vdev);
1676 	media_entity_cleanup(&q->vdev.entity);
1677 	v4l2_device_unregister_subdev(&q->subdev);
1678 	media_entity_cleanup(&q->subdev.entity);
1679 	cio2_fbpt_exit(q, &cio2->pci_dev->dev);
1680 	mutex_destroy(&q->subdev_lock);
1681 	mutex_destroy(&q->lock);
1682 }
1683 
cio2_queues_init(struct cio2_device * cio2)1684 static int cio2_queues_init(struct cio2_device *cio2)
1685 {
1686 	int i, r;
1687 
1688 	for (i = 0; i < CIO2_QUEUES; i++) {
1689 		r = cio2_queue_init(cio2, &cio2->queue[i]);
1690 		if (r)
1691 			break;
1692 	}
1693 
1694 	if (i == CIO2_QUEUES)
1695 		return 0;
1696 
1697 	for (i--; i >= 0; i--)
1698 		cio2_queue_exit(cio2, &cio2->queue[i]);
1699 
1700 	return r;
1701 }
1702 
cio2_queues_exit(struct cio2_device * cio2)1703 static void cio2_queues_exit(struct cio2_device *cio2)
1704 {
1705 	unsigned int i;
1706 
1707 	for (i = 0; i < CIO2_QUEUES; i++)
1708 		cio2_queue_exit(cio2, &cio2->queue[i]);
1709 }
1710 
1711 /**************** PCI interface ****************/
1712 
cio2_pci_probe(struct pci_dev * pci_dev,const struct pci_device_id * id)1713 static int cio2_pci_probe(struct pci_dev *pci_dev,
1714 			  const struct pci_device_id *id)
1715 {
1716 	struct cio2_device *cio2;
1717 	int r;
1718 
1719 	cio2 = devm_kzalloc(&pci_dev->dev, sizeof(*cio2), GFP_KERNEL);
1720 	if (!cio2)
1721 		return -ENOMEM;
1722 	cio2->pci_dev = pci_dev;
1723 
1724 	r = pcim_enable_device(pci_dev);
1725 	if (r) {
1726 		dev_err(&pci_dev->dev, "failed to enable device (%d)\n", r);
1727 		return r;
1728 	}
1729 
1730 	dev_info(&pci_dev->dev, "device 0x%x (rev: 0x%x)\n",
1731 		 pci_dev->device, pci_dev->revision);
1732 
1733 	r = pcim_iomap_regions(pci_dev, 1 << CIO2_PCI_BAR, pci_name(pci_dev));
1734 	if (r) {
1735 		dev_err(&pci_dev->dev, "failed to remap I/O memory (%d)\n", r);
1736 		return -ENODEV;
1737 	}
1738 
1739 	cio2->base = pcim_iomap_table(pci_dev)[CIO2_PCI_BAR];
1740 
1741 	pci_set_drvdata(pci_dev, cio2);
1742 
1743 	pci_set_master(pci_dev);
1744 
1745 	r = pci_set_dma_mask(pci_dev, CIO2_DMA_MASK);
1746 	if (r) {
1747 		dev_err(&pci_dev->dev, "failed to set DMA mask (%d)\n", r);
1748 		return -ENODEV;
1749 	}
1750 
1751 	r = pci_enable_msi(pci_dev);
1752 	if (r) {
1753 		dev_err(&pci_dev->dev, "failed to enable MSI (%d)\n", r);
1754 		return r;
1755 	}
1756 
1757 	r = cio2_fbpt_init_dummy(cio2);
1758 	if (r)
1759 		return r;
1760 
1761 	mutex_init(&cio2->lock);
1762 
1763 	cio2->media_dev.dev = &cio2->pci_dev->dev;
1764 	strscpy(cio2->media_dev.model, CIO2_DEVICE_NAME,
1765 		sizeof(cio2->media_dev.model));
1766 	snprintf(cio2->media_dev.bus_info, sizeof(cio2->media_dev.bus_info),
1767 		 "PCI:%s", pci_name(cio2->pci_dev));
1768 	cio2->media_dev.hw_revision = 0;
1769 
1770 	media_device_init(&cio2->media_dev);
1771 	r = media_device_register(&cio2->media_dev);
1772 	if (r < 0)
1773 		goto fail_mutex_destroy;
1774 
1775 	cio2->v4l2_dev.mdev = &cio2->media_dev;
1776 	r = v4l2_device_register(&pci_dev->dev, &cio2->v4l2_dev);
1777 	if (r) {
1778 		dev_err(&pci_dev->dev,
1779 			"failed to register V4L2 device (%d)\n", r);
1780 		goto fail_media_device_unregister;
1781 	}
1782 
1783 	r = cio2_queues_init(cio2);
1784 	if (r)
1785 		goto fail_v4l2_device_unregister;
1786 
1787 	v4l2_async_notifier_init(&cio2->notifier);
1788 
1789 	/* Register notifier for subdevices we care */
1790 	r = cio2_parse_firmware(cio2);
1791 	if (r)
1792 		goto fail_clean_notifier;
1793 
1794 	r = devm_request_irq(&pci_dev->dev, pci_dev->irq, cio2_irq,
1795 			     IRQF_SHARED, CIO2_NAME, cio2);
1796 	if (r) {
1797 		dev_err(&pci_dev->dev, "failed to request IRQ (%d)\n", r);
1798 		goto fail_clean_notifier;
1799 	}
1800 
1801 	pm_runtime_put_noidle(&pci_dev->dev);
1802 	pm_runtime_allow(&pci_dev->dev);
1803 
1804 	return 0;
1805 
1806 fail_clean_notifier:
1807 	v4l2_async_notifier_unregister(&cio2->notifier);
1808 	v4l2_async_notifier_cleanup(&cio2->notifier);
1809 	cio2_queues_exit(cio2);
1810 fail_v4l2_device_unregister:
1811 	v4l2_device_unregister(&cio2->v4l2_dev);
1812 fail_media_device_unregister:
1813 	media_device_unregister(&cio2->media_dev);
1814 	media_device_cleanup(&cio2->media_dev);
1815 fail_mutex_destroy:
1816 	mutex_destroy(&cio2->lock);
1817 	cio2_fbpt_exit_dummy(cio2);
1818 
1819 	return r;
1820 }
1821 
cio2_pci_remove(struct pci_dev * pci_dev)1822 static void cio2_pci_remove(struct pci_dev *pci_dev)
1823 {
1824 	struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1825 
1826 	media_device_unregister(&cio2->media_dev);
1827 	v4l2_async_notifier_unregister(&cio2->notifier);
1828 	v4l2_async_notifier_cleanup(&cio2->notifier);
1829 	cio2_queues_exit(cio2);
1830 	cio2_fbpt_exit_dummy(cio2);
1831 	v4l2_device_unregister(&cio2->v4l2_dev);
1832 	media_device_cleanup(&cio2->media_dev);
1833 	mutex_destroy(&cio2->lock);
1834 
1835 	pm_runtime_forbid(&pci_dev->dev);
1836 	pm_runtime_get_noresume(&pci_dev->dev);
1837 }
1838 
cio2_runtime_suspend(struct device * dev)1839 static int __maybe_unused cio2_runtime_suspend(struct device *dev)
1840 {
1841 	struct pci_dev *pci_dev = to_pci_dev(dev);
1842 	struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1843 	void __iomem *const base = cio2->base;
1844 	u16 pm;
1845 
1846 	writel(CIO2_D0I3C_I3, base + CIO2_REG_D0I3C);
1847 	dev_dbg(dev, "cio2 runtime suspend.\n");
1848 
1849 	pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm);
1850 	pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT;
1851 	pm |= CIO2_PMCSR_D3;
1852 	pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm);
1853 
1854 	return 0;
1855 }
1856 
cio2_runtime_resume(struct device * dev)1857 static int __maybe_unused cio2_runtime_resume(struct device *dev)
1858 {
1859 	struct pci_dev *pci_dev = to_pci_dev(dev);
1860 	struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1861 	void __iomem *const base = cio2->base;
1862 	u16 pm;
1863 
1864 	writel(CIO2_D0I3C_RR, base + CIO2_REG_D0I3C);
1865 	dev_dbg(dev, "cio2 runtime resume.\n");
1866 
1867 	pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm);
1868 	pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT;
1869 	pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm);
1870 
1871 	return 0;
1872 }
1873 
1874 /*
1875  * Helper function to advance all the elements of a circular buffer by "start"
1876  * positions
1877  */
arrange(void * ptr,size_t elem_size,size_t elems,size_t start)1878 static void arrange(void *ptr, size_t elem_size, size_t elems, size_t start)
1879 {
1880 	struct {
1881 		size_t begin, end;
1882 	} arr[2] = {
1883 		{ 0, start - 1 },
1884 		{ start, elems - 1 },
1885 	};
1886 
1887 #define CHUNK_SIZE(a) ((a)->end - (a)->begin + 1)
1888 
1889 	/* Loop as long as we have out-of-place entries */
1890 	while (CHUNK_SIZE(&arr[0]) && CHUNK_SIZE(&arr[1])) {
1891 		size_t size0, i;
1892 
1893 		/*
1894 		 * Find the number of entries that can be arranged on this
1895 		 * iteration.
1896 		 */
1897 		size0 = min(CHUNK_SIZE(&arr[0]), CHUNK_SIZE(&arr[1]));
1898 
1899 		/* Swap the entries in two parts of the array. */
1900 		for (i = 0; i < size0; i++) {
1901 			u8 *d = ptr + elem_size * (arr[1].begin + i);
1902 			u8 *s = ptr + elem_size * (arr[0].begin + i);
1903 			size_t j;
1904 
1905 			for (j = 0; j < elem_size; j++)
1906 				swap(d[j], s[j]);
1907 		}
1908 
1909 		if (CHUNK_SIZE(&arr[0]) > CHUNK_SIZE(&arr[1])) {
1910 			/* The end of the first array remains unarranged. */
1911 			arr[0].begin += size0;
1912 		} else {
1913 			/*
1914 			 * The first array is fully arranged so we proceed
1915 			 * handling the next one.
1916 			 */
1917 			arr[0].begin = arr[1].begin;
1918 			arr[0].end = arr[1].begin + size0 - 1;
1919 			arr[1].begin += size0;
1920 		}
1921 	}
1922 }
1923 
cio2_fbpt_rearrange(struct cio2_device * cio2,struct cio2_queue * q)1924 static void cio2_fbpt_rearrange(struct cio2_device *cio2, struct cio2_queue *q)
1925 {
1926 	unsigned int i, j;
1927 
1928 	for (i = 0, j = q->bufs_first; i < CIO2_MAX_BUFFERS;
1929 		i++, j = (j + 1) % CIO2_MAX_BUFFERS)
1930 		if (q->bufs[j])
1931 			break;
1932 
1933 	if (i == CIO2_MAX_BUFFERS)
1934 		return;
1935 
1936 	if (j) {
1937 		arrange(q->fbpt, sizeof(struct cio2_fbpt_entry) * CIO2_MAX_LOPS,
1938 			CIO2_MAX_BUFFERS, j);
1939 		arrange(q->bufs, sizeof(struct cio2_buffer *),
1940 			CIO2_MAX_BUFFERS, j);
1941 	}
1942 
1943 	/*
1944 	 * DMA clears the valid bit when accessing the buffer.
1945 	 * When stopping stream in suspend callback, some of the buffers
1946 	 * may be in invalid state. After resume, when DMA meets the invalid
1947 	 * buffer, it will halt and stop receiving new data.
1948 	 * To avoid DMA halting, set the valid bit for all buffers in FBPT.
1949 	 */
1950 	for (i = 0; i < CIO2_MAX_BUFFERS; i++)
1951 		cio2_fbpt_entry_enable(cio2, q->fbpt + i * CIO2_MAX_LOPS);
1952 }
1953 
cio2_suspend(struct device * dev)1954 static int __maybe_unused cio2_suspend(struct device *dev)
1955 {
1956 	struct pci_dev *pci_dev = to_pci_dev(dev);
1957 	struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1958 	struct cio2_queue *q = cio2->cur_queue;
1959 
1960 	dev_dbg(dev, "cio2 suspend\n");
1961 	if (!cio2->streaming)
1962 		return 0;
1963 
1964 	/* Stop stream */
1965 	cio2_hw_exit(cio2, q);
1966 	synchronize_irq(pci_dev->irq);
1967 
1968 	pm_runtime_force_suspend(dev);
1969 
1970 	/*
1971 	 * Upon resume, hw starts to process the fbpt entries from beginning,
1972 	 * so relocate the queued buffs to the fbpt head before suspend.
1973 	 */
1974 	cio2_fbpt_rearrange(cio2, q);
1975 	q->bufs_first = 0;
1976 	q->bufs_next = 0;
1977 
1978 	return 0;
1979 }
1980 
cio2_resume(struct device * dev)1981 static int __maybe_unused cio2_resume(struct device *dev)
1982 {
1983 	struct cio2_device *cio2 = dev_get_drvdata(dev);
1984 	struct cio2_queue *q = cio2->cur_queue;
1985 	int r;
1986 
1987 	dev_dbg(dev, "cio2 resume\n");
1988 	if (!cio2->streaming)
1989 		return 0;
1990 	/* Start stream */
1991 	r = pm_runtime_force_resume(&cio2->pci_dev->dev);
1992 	if (r < 0) {
1993 		dev_err(&cio2->pci_dev->dev,
1994 			"failed to set power %d\n", r);
1995 		return r;
1996 	}
1997 
1998 	r = cio2_hw_init(cio2, q);
1999 	if (r)
2000 		dev_err(dev, "fail to init cio2 hw\n");
2001 
2002 	return r;
2003 }
2004 
2005 static const struct dev_pm_ops cio2_pm_ops = {
2006 	SET_RUNTIME_PM_OPS(&cio2_runtime_suspend, &cio2_runtime_resume, NULL)
2007 	SET_SYSTEM_SLEEP_PM_OPS(&cio2_suspend, &cio2_resume)
2008 };
2009 
2010 static const struct pci_device_id cio2_pci_id_table[] = {
2011 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, CIO2_PCI_ID) },
2012 	{ }
2013 };
2014 
2015 MODULE_DEVICE_TABLE(pci, cio2_pci_id_table);
2016 
2017 static struct pci_driver cio2_pci_driver = {
2018 	.name = CIO2_NAME,
2019 	.id_table = cio2_pci_id_table,
2020 	.probe = cio2_pci_probe,
2021 	.remove = cio2_pci_remove,
2022 	.driver = {
2023 		.pm = &cio2_pm_ops,
2024 	},
2025 };
2026 
2027 module_pci_driver(cio2_pci_driver);
2028 
2029 MODULE_AUTHOR("Tuukka Toivonen <tuukka.toivonen@intel.com>");
2030 MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
2031 MODULE_AUTHOR("Jian Xu Zheng");
2032 MODULE_AUTHOR("Yuning Pu <yuning.pu@intel.com>");
2033 MODULE_AUTHOR("Yong Zhi <yong.zhi@intel.com>");
2034 MODULE_LICENSE("GPL v2");
2035 MODULE_DESCRIPTION("IPU3 CIO2 driver");
2036