1 /*
2 * isp.c
3 *
4 * TI OMAP3 ISP - Core
5 *
6 * Copyright (C) 2006-2010 Nokia Corporation
7 * Copyright (C) 2007-2009 Texas Instruments, Inc.
8 *
9 * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
10 * Sakari Ailus <sakari.ailus@iki.fi>
11 *
12 * Contributors:
13 * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
14 * Sakari Ailus <sakari.ailus@iki.fi>
15 * David Cohen <dacohen@gmail.com>
16 * Stanimir Varbanov <svarbanov@mm-sol.com>
17 * Vimarsh Zutshi <vimarsh.zutshi@gmail.com>
18 * Tuukka Toivonen <tuukkat76@gmail.com>
19 * Sergio Aguirre <saaguirre@ti.com>
20 * Antti Koskipaa <akoskipa@gmail.com>
21 * Ivan T. Ivanov <iivanov@mm-sol.com>
22 * RaniSuneela <r-m@ti.com>
23 * Atanas Filipov <afilipov@mm-sol.com>
24 * Gjorgji Rosikopulos <grosikopulos@mm-sol.com>
25 * Hiroshi DOYU <hiroshi.doyu@nokia.com>
26 * Nayden Kanchev <nkanchev@mm-sol.com>
27 * Phil Carmody <ext-phil.2.carmody@nokia.com>
28 * Artem Bityutskiy <artem.bityutskiy@nokia.com>
29 * Dominic Curran <dcurran@ti.com>
30 * Ilkka Myllyperkio <ilkka.myllyperkio@sofica.fi>
31 * Pallavi Kulkarni <p-kulkarni@ti.com>
32 * Vaibhav Hiremath <hvaibhav@ti.com>
33 * Mohit Jalori <mjalori@ti.com>
34 * Sameer Venkatraman <sameerv@ti.com>
35 * Senthilvadivu Guruswamy <svadivu@ti.com>
36 * Thara Gopinath <thara@ti.com>
37 * Toni Leinonen <toni.leinonen@nokia.com>
38 * Troy Laramy <t-laramy@ti.com>
39 *
40 * This program is free software; you can redistribute it and/or modify
41 * it under the terms of the GNU General Public License version 2 as
42 * published by the Free Software Foundation.
43 */
44
45 #include <asm/cacheflush.h>
46
47 #include <linux/clk.h>
48 #include <linux/clkdev.h>
49 #include <linux/delay.h>
50 #include <linux/device.h>
51 #include <linux/dma-mapping.h>
52 #include <linux/i2c.h>
53 #include <linux/interrupt.h>
54 #include <linux/mfd/syscon.h>
55 #include <linux/module.h>
56 #include <linux/omap-iommu.h>
57 #include <linux/platform_device.h>
58 #include <linux/regulator/consumer.h>
59 #include <linux/slab.h>
60 #include <linux/sched.h>
61 #include <linux/vmalloc.h>
62
63 #include <asm/dma-iommu.h>
64
65 #include <media/v4l2-common.h>
66 #include <media/v4l2-device.h>
67 #include <media/v4l2-of.h>
68
69 #include "isp.h"
70 #include "ispreg.h"
71 #include "ispccdc.h"
72 #include "isppreview.h"
73 #include "ispresizer.h"
74 #include "ispcsi2.h"
75 #include "ispccp2.h"
76 #include "isph3a.h"
77 #include "isphist.h"
78
79 static unsigned int autoidle;
80 module_param(autoidle, int, 0444);
81 MODULE_PARM_DESC(autoidle, "Enable OMAP3ISP AUTOIDLE support");
82
83 static void isp_save_ctx(struct isp_device *isp);
84
85 static void isp_restore_ctx(struct isp_device *isp);
86
87 static const struct isp_res_mapping isp_res_maps[] = {
88 {
89 .isp_rev = ISP_REVISION_2_0,
90 .offset = {
91 /* first MMIO area */
92 0x0000, /* base, len 0x0070 */
93 0x0400, /* ccp2, len 0x01f0 */
94 0x0600, /* ccdc, len 0x00a8 */
95 0x0a00, /* hist, len 0x0048 */
96 0x0c00, /* h3a, len 0x0060 */
97 0x0e00, /* preview, len 0x00a0 */
98 0x1000, /* resizer, len 0x00ac */
99 0x1200, /* sbl, len 0x00fc */
100 /* second MMIO area */
101 0x0000, /* csi2a, len 0x0170 */
102 0x0170, /* csiphy2, len 0x000c */
103 },
104 .phy_type = ISP_PHY_TYPE_3430,
105 },
106 {
107 .isp_rev = ISP_REVISION_15_0,
108 .offset = {
109 /* first MMIO area */
110 0x0000, /* base, len 0x0070 */
111 0x0400, /* ccp2, len 0x01f0 */
112 0x0600, /* ccdc, len 0x00a8 */
113 0x0a00, /* hist, len 0x0048 */
114 0x0c00, /* h3a, len 0x0060 */
115 0x0e00, /* preview, len 0x00a0 */
116 0x1000, /* resizer, len 0x00ac */
117 0x1200, /* sbl, len 0x00fc */
118 /* second MMIO area */
119 0x0000, /* csi2a, len 0x0170 (1st area) */
120 0x0170, /* csiphy2, len 0x000c */
121 0x01c0, /* csi2a, len 0x0040 (2nd area) */
122 0x0400, /* csi2c, len 0x0170 (1st area) */
123 0x0570, /* csiphy1, len 0x000c */
124 0x05c0, /* csi2c, len 0x0040 (2nd area) */
125 },
126 .phy_type = ISP_PHY_TYPE_3630,
127 },
128 };
129
130 /* Structure for saving/restoring ISP module registers */
131 static struct isp_reg isp_reg_list[] = {
132 {OMAP3_ISP_IOMEM_MAIN, ISP_SYSCONFIG, 0},
133 {OMAP3_ISP_IOMEM_MAIN, ISP_CTRL, 0},
134 {OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_CTRL, 0},
135 {0, ISP_TOK_TERM, 0}
136 };
137
138 /*
139 * omap3isp_flush - Post pending L3 bus writes by doing a register readback
140 * @isp: OMAP3 ISP device
141 *
142 * In order to force posting of pending writes, we need to write and
143 * readback the same register, in this case the revision register.
144 *
145 * See this link for reference:
146 * http://www.mail-archive.com/linux-omap@vger.kernel.org/msg08149.html
147 */
omap3isp_flush(struct isp_device * isp)148 void omap3isp_flush(struct isp_device *isp)
149 {
150 isp_reg_writel(isp, 0, OMAP3_ISP_IOMEM_MAIN, ISP_REVISION);
151 isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_REVISION);
152 }
153
154 /* -----------------------------------------------------------------------------
155 * XCLK
156 */
157
158 #define to_isp_xclk(_hw) container_of(_hw, struct isp_xclk, hw)
159
isp_xclk_update(struct isp_xclk * xclk,u32 divider)160 static void isp_xclk_update(struct isp_xclk *xclk, u32 divider)
161 {
162 switch (xclk->id) {
163 case ISP_XCLK_A:
164 isp_reg_clr_set(xclk->isp, OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_CTRL,
165 ISPTCTRL_CTRL_DIVA_MASK,
166 divider << ISPTCTRL_CTRL_DIVA_SHIFT);
167 break;
168 case ISP_XCLK_B:
169 isp_reg_clr_set(xclk->isp, OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_CTRL,
170 ISPTCTRL_CTRL_DIVB_MASK,
171 divider << ISPTCTRL_CTRL_DIVB_SHIFT);
172 break;
173 }
174 }
175
isp_xclk_prepare(struct clk_hw * hw)176 static int isp_xclk_prepare(struct clk_hw *hw)
177 {
178 struct isp_xclk *xclk = to_isp_xclk(hw);
179
180 omap3isp_get(xclk->isp);
181
182 return 0;
183 }
184
isp_xclk_unprepare(struct clk_hw * hw)185 static void isp_xclk_unprepare(struct clk_hw *hw)
186 {
187 struct isp_xclk *xclk = to_isp_xclk(hw);
188
189 omap3isp_put(xclk->isp);
190 }
191
isp_xclk_enable(struct clk_hw * hw)192 static int isp_xclk_enable(struct clk_hw *hw)
193 {
194 struct isp_xclk *xclk = to_isp_xclk(hw);
195 unsigned long flags;
196
197 spin_lock_irqsave(&xclk->lock, flags);
198 isp_xclk_update(xclk, xclk->divider);
199 xclk->enabled = true;
200 spin_unlock_irqrestore(&xclk->lock, flags);
201
202 return 0;
203 }
204
isp_xclk_disable(struct clk_hw * hw)205 static void isp_xclk_disable(struct clk_hw *hw)
206 {
207 struct isp_xclk *xclk = to_isp_xclk(hw);
208 unsigned long flags;
209
210 spin_lock_irqsave(&xclk->lock, flags);
211 isp_xclk_update(xclk, 0);
212 xclk->enabled = false;
213 spin_unlock_irqrestore(&xclk->lock, flags);
214 }
215
isp_xclk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)216 static unsigned long isp_xclk_recalc_rate(struct clk_hw *hw,
217 unsigned long parent_rate)
218 {
219 struct isp_xclk *xclk = to_isp_xclk(hw);
220
221 return parent_rate / xclk->divider;
222 }
223
isp_xclk_calc_divider(unsigned long * rate,unsigned long parent_rate)224 static u32 isp_xclk_calc_divider(unsigned long *rate, unsigned long parent_rate)
225 {
226 u32 divider;
227
228 if (*rate >= parent_rate) {
229 *rate = parent_rate;
230 return ISPTCTRL_CTRL_DIV_BYPASS;
231 }
232
233 if (*rate == 0)
234 *rate = 1;
235
236 divider = DIV_ROUND_CLOSEST(parent_rate, *rate);
237 if (divider >= ISPTCTRL_CTRL_DIV_BYPASS)
238 divider = ISPTCTRL_CTRL_DIV_BYPASS - 1;
239
240 *rate = parent_rate / divider;
241 return divider;
242 }
243
isp_xclk_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * parent_rate)244 static long isp_xclk_round_rate(struct clk_hw *hw, unsigned long rate,
245 unsigned long *parent_rate)
246 {
247 isp_xclk_calc_divider(&rate, *parent_rate);
248 return rate;
249 }
250
isp_xclk_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)251 static int isp_xclk_set_rate(struct clk_hw *hw, unsigned long rate,
252 unsigned long parent_rate)
253 {
254 struct isp_xclk *xclk = to_isp_xclk(hw);
255 unsigned long flags;
256 u32 divider;
257
258 divider = isp_xclk_calc_divider(&rate, parent_rate);
259
260 spin_lock_irqsave(&xclk->lock, flags);
261
262 xclk->divider = divider;
263 if (xclk->enabled)
264 isp_xclk_update(xclk, divider);
265
266 spin_unlock_irqrestore(&xclk->lock, flags);
267
268 dev_dbg(xclk->isp->dev, "%s: cam_xclk%c set to %lu Hz (div %u)\n",
269 __func__, xclk->id == ISP_XCLK_A ? 'a' : 'b', rate, divider);
270 return 0;
271 }
272
273 static const struct clk_ops isp_xclk_ops = {
274 .prepare = isp_xclk_prepare,
275 .unprepare = isp_xclk_unprepare,
276 .enable = isp_xclk_enable,
277 .disable = isp_xclk_disable,
278 .recalc_rate = isp_xclk_recalc_rate,
279 .round_rate = isp_xclk_round_rate,
280 .set_rate = isp_xclk_set_rate,
281 };
282
283 static const char *isp_xclk_parent_name = "cam_mclk";
284
285 static const struct clk_init_data isp_xclk_init_data = {
286 .name = "cam_xclk",
287 .ops = &isp_xclk_ops,
288 .parent_names = &isp_xclk_parent_name,
289 .num_parents = 1,
290 };
291
isp_xclk_src_get(struct of_phandle_args * clkspec,void * data)292 static struct clk *isp_xclk_src_get(struct of_phandle_args *clkspec, void *data)
293 {
294 unsigned int idx = clkspec->args[0];
295 struct isp_device *isp = data;
296
297 if (idx >= ARRAY_SIZE(isp->xclks))
298 return ERR_PTR(-ENOENT);
299
300 return isp->xclks[idx].clk;
301 }
302
isp_xclk_init(struct isp_device * isp)303 static int isp_xclk_init(struct isp_device *isp)
304 {
305 struct device_node *np = isp->dev->of_node;
306 struct clk_init_data init = { 0 };
307 unsigned int i;
308
309 for (i = 0; i < ARRAY_SIZE(isp->xclks); ++i)
310 isp->xclks[i].clk = ERR_PTR(-EINVAL);
311
312 for (i = 0; i < ARRAY_SIZE(isp->xclks); ++i) {
313 struct isp_xclk *xclk = &isp->xclks[i];
314
315 xclk->isp = isp;
316 xclk->id = i == 0 ? ISP_XCLK_A : ISP_XCLK_B;
317 xclk->divider = 1;
318 spin_lock_init(&xclk->lock);
319
320 init.name = i == 0 ? "cam_xclka" : "cam_xclkb";
321 init.ops = &isp_xclk_ops;
322 init.parent_names = &isp_xclk_parent_name;
323 init.num_parents = 1;
324
325 xclk->hw.init = &init;
326 /*
327 * The first argument is NULL in order to avoid circular
328 * reference, as this driver takes reference on the
329 * sensor subdevice modules and the sensors would take
330 * reference on this module through clk_get().
331 */
332 xclk->clk = clk_register(NULL, &xclk->hw);
333 if (IS_ERR(xclk->clk))
334 return PTR_ERR(xclk->clk);
335 }
336
337 if (np)
338 of_clk_add_provider(np, isp_xclk_src_get, isp);
339
340 return 0;
341 }
342
isp_xclk_cleanup(struct isp_device * isp)343 static void isp_xclk_cleanup(struct isp_device *isp)
344 {
345 struct device_node *np = isp->dev->of_node;
346 unsigned int i;
347
348 if (np)
349 of_clk_del_provider(np);
350
351 for (i = 0; i < ARRAY_SIZE(isp->xclks); ++i) {
352 struct isp_xclk *xclk = &isp->xclks[i];
353
354 if (!IS_ERR(xclk->clk))
355 clk_unregister(xclk->clk);
356 }
357 }
358
359 /* -----------------------------------------------------------------------------
360 * Interrupts
361 */
362
363 /*
364 * isp_enable_interrupts - Enable ISP interrupts.
365 * @isp: OMAP3 ISP device
366 */
isp_enable_interrupts(struct isp_device * isp)367 static void isp_enable_interrupts(struct isp_device *isp)
368 {
369 static const u32 irq = IRQ0ENABLE_CSIA_IRQ
370 | IRQ0ENABLE_CSIB_IRQ
371 | IRQ0ENABLE_CCDC_LSC_PREF_ERR_IRQ
372 | IRQ0ENABLE_CCDC_LSC_DONE_IRQ
373 | IRQ0ENABLE_CCDC_VD0_IRQ
374 | IRQ0ENABLE_CCDC_VD1_IRQ
375 | IRQ0ENABLE_HS_VS_IRQ
376 | IRQ0ENABLE_HIST_DONE_IRQ
377 | IRQ0ENABLE_H3A_AWB_DONE_IRQ
378 | IRQ0ENABLE_H3A_AF_DONE_IRQ
379 | IRQ0ENABLE_PRV_DONE_IRQ
380 | IRQ0ENABLE_RSZ_DONE_IRQ;
381
382 isp_reg_writel(isp, irq, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS);
383 isp_reg_writel(isp, irq, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0ENABLE);
384 }
385
386 /*
387 * isp_disable_interrupts - Disable ISP interrupts.
388 * @isp: OMAP3 ISP device
389 */
isp_disable_interrupts(struct isp_device * isp)390 static void isp_disable_interrupts(struct isp_device *isp)
391 {
392 isp_reg_writel(isp, 0, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0ENABLE);
393 }
394
395 /*
396 * isp_core_init - ISP core settings
397 * @isp: OMAP3 ISP device
398 * @idle: Consider idle state.
399 *
400 * Set the power settings for the ISP and SBL bus and configure the HS/VS
401 * interrupt source.
402 *
403 * We need to configure the HS/VS interrupt source before interrupts get
404 * enabled, as the sensor might be free-running and the ISP default setting
405 * (HS edge) would put an unnecessary burden on the CPU.
406 */
isp_core_init(struct isp_device * isp,int idle)407 static void isp_core_init(struct isp_device *isp, int idle)
408 {
409 isp_reg_writel(isp,
410 ((idle ? ISP_SYSCONFIG_MIDLEMODE_SMARTSTANDBY :
411 ISP_SYSCONFIG_MIDLEMODE_FORCESTANDBY) <<
412 ISP_SYSCONFIG_MIDLEMODE_SHIFT) |
413 ((isp->revision == ISP_REVISION_15_0) ?
414 ISP_SYSCONFIG_AUTOIDLE : 0),
415 OMAP3_ISP_IOMEM_MAIN, ISP_SYSCONFIG);
416
417 isp_reg_writel(isp,
418 (isp->autoidle ? ISPCTRL_SBL_AUTOIDLE : 0) |
419 ISPCTRL_SYNC_DETECT_VSRISE,
420 OMAP3_ISP_IOMEM_MAIN, ISP_CTRL);
421 }
422
423 /*
424 * Configure the bridge and lane shifter. Valid inputs are
425 *
426 * CCDC_INPUT_PARALLEL: Parallel interface
427 * CCDC_INPUT_CSI2A: CSI2a receiver
428 * CCDC_INPUT_CCP2B: CCP2b receiver
429 * CCDC_INPUT_CSI2C: CSI2c receiver
430 *
431 * The bridge and lane shifter are configured according to the selected input
432 * and the ISP platform data.
433 */
omap3isp_configure_bridge(struct isp_device * isp,enum ccdc_input_entity input,const struct isp_parallel_cfg * parcfg,unsigned int shift,unsigned int bridge)434 void omap3isp_configure_bridge(struct isp_device *isp,
435 enum ccdc_input_entity input,
436 const struct isp_parallel_cfg *parcfg,
437 unsigned int shift, unsigned int bridge)
438 {
439 u32 ispctrl_val;
440
441 ispctrl_val = isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL);
442 ispctrl_val &= ~ISPCTRL_SHIFT_MASK;
443 ispctrl_val &= ~ISPCTRL_PAR_CLK_POL_INV;
444 ispctrl_val &= ~ISPCTRL_PAR_SER_CLK_SEL_MASK;
445 ispctrl_val &= ~ISPCTRL_PAR_BRIDGE_MASK;
446 ispctrl_val |= bridge;
447
448 switch (input) {
449 case CCDC_INPUT_PARALLEL:
450 ispctrl_val |= ISPCTRL_PAR_SER_CLK_SEL_PARALLEL;
451 ispctrl_val |= parcfg->clk_pol << ISPCTRL_PAR_CLK_POL_SHIFT;
452 shift += parcfg->data_lane_shift * 2;
453 break;
454
455 case CCDC_INPUT_CSI2A:
456 ispctrl_val |= ISPCTRL_PAR_SER_CLK_SEL_CSIA;
457 break;
458
459 case CCDC_INPUT_CCP2B:
460 ispctrl_val |= ISPCTRL_PAR_SER_CLK_SEL_CSIB;
461 break;
462
463 case CCDC_INPUT_CSI2C:
464 ispctrl_val |= ISPCTRL_PAR_SER_CLK_SEL_CSIC;
465 break;
466
467 default:
468 return;
469 }
470
471 ispctrl_val |= ((shift/2) << ISPCTRL_SHIFT_SHIFT) & ISPCTRL_SHIFT_MASK;
472
473 isp_reg_writel(isp, ispctrl_val, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL);
474 }
475
omap3isp_hist_dma_done(struct isp_device * isp)476 void omap3isp_hist_dma_done(struct isp_device *isp)
477 {
478 if (omap3isp_ccdc_busy(&isp->isp_ccdc) ||
479 omap3isp_stat_pcr_busy(&isp->isp_hist)) {
480 /* Histogram cannot be enabled in this frame anymore */
481 atomic_set(&isp->isp_hist.buf_err, 1);
482 dev_dbg(isp->dev, "hist: Out of synchronization with "
483 "CCDC. Ignoring next buffer.\n");
484 }
485 }
486
isp_isr_dbg(struct isp_device * isp,u32 irqstatus)487 static inline void isp_isr_dbg(struct isp_device *isp, u32 irqstatus)
488 {
489 static const char *name[] = {
490 "CSIA_IRQ",
491 "res1",
492 "res2",
493 "CSIB_LCM_IRQ",
494 "CSIB_IRQ",
495 "res5",
496 "res6",
497 "res7",
498 "CCDC_VD0_IRQ",
499 "CCDC_VD1_IRQ",
500 "CCDC_VD2_IRQ",
501 "CCDC_ERR_IRQ",
502 "H3A_AF_DONE_IRQ",
503 "H3A_AWB_DONE_IRQ",
504 "res14",
505 "res15",
506 "HIST_DONE_IRQ",
507 "CCDC_LSC_DONE",
508 "CCDC_LSC_PREFETCH_COMPLETED",
509 "CCDC_LSC_PREFETCH_ERROR",
510 "PRV_DONE_IRQ",
511 "CBUFF_IRQ",
512 "res22",
513 "res23",
514 "RSZ_DONE_IRQ",
515 "OVF_IRQ",
516 "res26",
517 "res27",
518 "MMU_ERR_IRQ",
519 "OCP_ERR_IRQ",
520 "SEC_ERR_IRQ",
521 "HS_VS_IRQ",
522 };
523 int i;
524
525 dev_dbg(isp->dev, "ISP IRQ: ");
526
527 for (i = 0; i < ARRAY_SIZE(name); i++) {
528 if ((1 << i) & irqstatus)
529 printk(KERN_CONT "%s ", name[i]);
530 }
531 printk(KERN_CONT "\n");
532 }
533
isp_isr_sbl(struct isp_device * isp)534 static void isp_isr_sbl(struct isp_device *isp)
535 {
536 struct device *dev = isp->dev;
537 struct isp_pipeline *pipe;
538 u32 sbl_pcr;
539
540 /*
541 * Handle shared buffer logic overflows for video buffers.
542 * ISPSBL_PCR_CCDCPRV_2_RSZ_OVF can be safely ignored.
543 */
544 sbl_pcr = isp_reg_readl(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_PCR);
545 isp_reg_writel(isp, sbl_pcr, OMAP3_ISP_IOMEM_SBL, ISPSBL_PCR);
546 sbl_pcr &= ~ISPSBL_PCR_CCDCPRV_2_RSZ_OVF;
547
548 if (sbl_pcr)
549 dev_dbg(dev, "SBL overflow (PCR = 0x%08x)\n", sbl_pcr);
550
551 if (sbl_pcr & ISPSBL_PCR_CSIB_WBL_OVF) {
552 pipe = to_isp_pipeline(&isp->isp_ccp2.subdev.entity);
553 if (pipe != NULL)
554 pipe->error = true;
555 }
556
557 if (sbl_pcr & ISPSBL_PCR_CSIA_WBL_OVF) {
558 pipe = to_isp_pipeline(&isp->isp_csi2a.subdev.entity);
559 if (pipe != NULL)
560 pipe->error = true;
561 }
562
563 if (sbl_pcr & ISPSBL_PCR_CCDC_WBL_OVF) {
564 pipe = to_isp_pipeline(&isp->isp_ccdc.subdev.entity);
565 if (pipe != NULL)
566 pipe->error = true;
567 }
568
569 if (sbl_pcr & ISPSBL_PCR_PRV_WBL_OVF) {
570 pipe = to_isp_pipeline(&isp->isp_prev.subdev.entity);
571 if (pipe != NULL)
572 pipe->error = true;
573 }
574
575 if (sbl_pcr & (ISPSBL_PCR_RSZ1_WBL_OVF
576 | ISPSBL_PCR_RSZ2_WBL_OVF
577 | ISPSBL_PCR_RSZ3_WBL_OVF
578 | ISPSBL_PCR_RSZ4_WBL_OVF)) {
579 pipe = to_isp_pipeline(&isp->isp_res.subdev.entity);
580 if (pipe != NULL)
581 pipe->error = true;
582 }
583
584 if (sbl_pcr & ISPSBL_PCR_H3A_AF_WBL_OVF)
585 omap3isp_stat_sbl_overflow(&isp->isp_af);
586
587 if (sbl_pcr & ISPSBL_PCR_H3A_AEAWB_WBL_OVF)
588 omap3isp_stat_sbl_overflow(&isp->isp_aewb);
589 }
590
591 /*
592 * isp_isr - Interrupt Service Routine for Camera ISP module.
593 * @irq: Not used currently.
594 * @_isp: Pointer to the OMAP3 ISP device
595 *
596 * Handles the corresponding callback if plugged in.
597 */
isp_isr(int irq,void * _isp)598 static irqreturn_t isp_isr(int irq, void *_isp)
599 {
600 static const u32 ccdc_events = IRQ0STATUS_CCDC_LSC_PREF_ERR_IRQ |
601 IRQ0STATUS_CCDC_LSC_DONE_IRQ |
602 IRQ0STATUS_CCDC_VD0_IRQ |
603 IRQ0STATUS_CCDC_VD1_IRQ |
604 IRQ0STATUS_HS_VS_IRQ;
605 struct isp_device *isp = _isp;
606 u32 irqstatus;
607
608 irqstatus = isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS);
609 isp_reg_writel(isp, irqstatus, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS);
610
611 isp_isr_sbl(isp);
612
613 if (irqstatus & IRQ0STATUS_CSIA_IRQ)
614 omap3isp_csi2_isr(&isp->isp_csi2a);
615
616 if (irqstatus & IRQ0STATUS_CSIB_IRQ)
617 omap3isp_ccp2_isr(&isp->isp_ccp2);
618
619 if (irqstatus & IRQ0STATUS_CCDC_VD0_IRQ) {
620 if (isp->isp_ccdc.output & CCDC_OUTPUT_PREVIEW)
621 omap3isp_preview_isr_frame_sync(&isp->isp_prev);
622 if (isp->isp_ccdc.output & CCDC_OUTPUT_RESIZER)
623 omap3isp_resizer_isr_frame_sync(&isp->isp_res);
624 omap3isp_stat_isr_frame_sync(&isp->isp_aewb);
625 omap3isp_stat_isr_frame_sync(&isp->isp_af);
626 omap3isp_stat_isr_frame_sync(&isp->isp_hist);
627 }
628
629 if (irqstatus & ccdc_events)
630 omap3isp_ccdc_isr(&isp->isp_ccdc, irqstatus & ccdc_events);
631
632 if (irqstatus & IRQ0STATUS_PRV_DONE_IRQ) {
633 if (isp->isp_prev.output & PREVIEW_OUTPUT_RESIZER)
634 omap3isp_resizer_isr_frame_sync(&isp->isp_res);
635 omap3isp_preview_isr(&isp->isp_prev);
636 }
637
638 if (irqstatus & IRQ0STATUS_RSZ_DONE_IRQ)
639 omap3isp_resizer_isr(&isp->isp_res);
640
641 if (irqstatus & IRQ0STATUS_H3A_AWB_DONE_IRQ)
642 omap3isp_stat_isr(&isp->isp_aewb);
643
644 if (irqstatus & IRQ0STATUS_H3A_AF_DONE_IRQ)
645 omap3isp_stat_isr(&isp->isp_af);
646
647 if (irqstatus & IRQ0STATUS_HIST_DONE_IRQ)
648 omap3isp_stat_isr(&isp->isp_hist);
649
650 omap3isp_flush(isp);
651
652 #if defined(DEBUG) && defined(ISP_ISR_DEBUG)
653 isp_isr_dbg(isp, irqstatus);
654 #endif
655
656 return IRQ_HANDLED;
657 }
658
659 /* -----------------------------------------------------------------------------
660 * Pipeline power management
661 *
662 * Entities must be powered up when part of a pipeline that contains at least
663 * one open video device node.
664 *
665 * To achieve this use the entity use_count field to track the number of users.
666 * For entities corresponding to video device nodes the use_count field stores
667 * the users count of the node. For entities corresponding to subdevs the
668 * use_count field stores the total number of users of all video device nodes
669 * in the pipeline.
670 *
671 * The omap3isp_pipeline_pm_use() function must be called in the open() and
672 * close() handlers of video device nodes. It increments or decrements the use
673 * count of all subdev entities in the pipeline.
674 *
675 * To react to link management on powered pipelines, the link setup notification
676 * callback updates the use count of all entities in the source and sink sides
677 * of the link.
678 */
679
680 /*
681 * isp_pipeline_pm_use_count - Count the number of users of a pipeline
682 * @entity: The entity
683 *
684 * Return the total number of users of all video device nodes in the pipeline.
685 */
isp_pipeline_pm_use_count(struct media_entity * entity)686 static int isp_pipeline_pm_use_count(struct media_entity *entity)
687 {
688 struct media_entity_graph graph;
689 int use = 0;
690
691 media_entity_graph_walk_start(&graph, entity);
692
693 while ((entity = media_entity_graph_walk_next(&graph))) {
694 if (media_entity_type(entity) == MEDIA_ENT_T_DEVNODE)
695 use += entity->use_count;
696 }
697
698 return use;
699 }
700
701 /*
702 * isp_pipeline_pm_power_one - Apply power change to an entity
703 * @entity: The entity
704 * @change: Use count change
705 *
706 * Change the entity use count by @change. If the entity is a subdev update its
707 * power state by calling the core::s_power operation when the use count goes
708 * from 0 to != 0 or from != 0 to 0.
709 *
710 * Return 0 on success or a negative error code on failure.
711 */
isp_pipeline_pm_power_one(struct media_entity * entity,int change)712 static int isp_pipeline_pm_power_one(struct media_entity *entity, int change)
713 {
714 struct v4l2_subdev *subdev;
715 int ret;
716
717 subdev = media_entity_type(entity) == MEDIA_ENT_T_V4L2_SUBDEV
718 ? media_entity_to_v4l2_subdev(entity) : NULL;
719
720 if (entity->use_count == 0 && change > 0 && subdev != NULL) {
721 ret = v4l2_subdev_call(subdev, core, s_power, 1);
722 if (ret < 0 && ret != -ENOIOCTLCMD)
723 return ret;
724 }
725
726 entity->use_count += change;
727 WARN_ON(entity->use_count < 0);
728
729 if (entity->use_count == 0 && change < 0 && subdev != NULL)
730 v4l2_subdev_call(subdev, core, s_power, 0);
731
732 return 0;
733 }
734
735 /*
736 * isp_pipeline_pm_power - Apply power change to all entities in a pipeline
737 * @entity: The entity
738 * @change: Use count change
739 *
740 * Walk the pipeline to update the use count and the power state of all non-node
741 * entities.
742 *
743 * Return 0 on success or a negative error code on failure.
744 */
isp_pipeline_pm_power(struct media_entity * entity,int change)745 static int isp_pipeline_pm_power(struct media_entity *entity, int change)
746 {
747 struct media_entity_graph graph;
748 struct media_entity *first = entity;
749 int ret = 0;
750
751 if (!change)
752 return 0;
753
754 media_entity_graph_walk_start(&graph, entity);
755
756 while (!ret && (entity = media_entity_graph_walk_next(&graph)))
757 if (media_entity_type(entity) != MEDIA_ENT_T_DEVNODE)
758 ret = isp_pipeline_pm_power_one(entity, change);
759
760 if (!ret)
761 return 0;
762
763 media_entity_graph_walk_start(&graph, first);
764
765 while ((first = media_entity_graph_walk_next(&graph))
766 && first != entity)
767 if (media_entity_type(first) != MEDIA_ENT_T_DEVNODE)
768 isp_pipeline_pm_power_one(first, -change);
769
770 return ret;
771 }
772
773 /*
774 * omap3isp_pipeline_pm_use - Update the use count of an entity
775 * @entity: The entity
776 * @use: Use (1) or stop using (0) the entity
777 *
778 * Update the use count of all entities in the pipeline and power entities on or
779 * off accordingly.
780 *
781 * Return 0 on success or a negative error code on failure. Powering entities
782 * off is assumed to never fail. No failure can occur when the use parameter is
783 * set to 0.
784 */
omap3isp_pipeline_pm_use(struct media_entity * entity,int use)785 int omap3isp_pipeline_pm_use(struct media_entity *entity, int use)
786 {
787 int change = use ? 1 : -1;
788 int ret;
789
790 mutex_lock(&entity->parent->graph_mutex);
791
792 /* Apply use count to node. */
793 entity->use_count += change;
794 WARN_ON(entity->use_count < 0);
795
796 /* Apply power change to connected non-nodes. */
797 ret = isp_pipeline_pm_power(entity, change);
798 if (ret < 0)
799 entity->use_count -= change;
800
801 mutex_unlock(&entity->parent->graph_mutex);
802
803 return ret;
804 }
805
806 /*
807 * isp_pipeline_link_notify - Link management notification callback
808 * @link: The link
809 * @flags: New link flags that will be applied
810 * @notification: The link's state change notification type (MEDIA_DEV_NOTIFY_*)
811 *
812 * React to link management on powered pipelines by updating the use count of
813 * all entities in the source and sink sides of the link. Entities are powered
814 * on or off accordingly.
815 *
816 * Return 0 on success or a negative error code on failure. Powering entities
817 * off is assumed to never fail. This function will not fail for disconnection
818 * events.
819 */
isp_pipeline_link_notify(struct media_link * link,u32 flags,unsigned int notification)820 static int isp_pipeline_link_notify(struct media_link *link, u32 flags,
821 unsigned int notification)
822 {
823 struct media_entity *source = link->source->entity;
824 struct media_entity *sink = link->sink->entity;
825 int source_use = isp_pipeline_pm_use_count(source);
826 int sink_use = isp_pipeline_pm_use_count(sink);
827 int ret;
828
829 if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH &&
830 !(flags & MEDIA_LNK_FL_ENABLED)) {
831 /* Powering off entities is assumed to never fail. */
832 isp_pipeline_pm_power(source, -sink_use);
833 isp_pipeline_pm_power(sink, -source_use);
834 return 0;
835 }
836
837 if (notification == MEDIA_DEV_NOTIFY_PRE_LINK_CH &&
838 (flags & MEDIA_LNK_FL_ENABLED)) {
839
840 ret = isp_pipeline_pm_power(source, sink_use);
841 if (ret < 0)
842 return ret;
843
844 ret = isp_pipeline_pm_power(sink, source_use);
845 if (ret < 0)
846 isp_pipeline_pm_power(source, -sink_use);
847
848 return ret;
849 }
850
851 return 0;
852 }
853
854 /* -----------------------------------------------------------------------------
855 * Pipeline stream management
856 */
857
858 /*
859 * isp_pipeline_enable - Enable streaming on a pipeline
860 * @pipe: ISP pipeline
861 * @mode: Stream mode (single shot or continuous)
862 *
863 * Walk the entities chain starting at the pipeline output video node and start
864 * all modules in the chain in the given mode.
865 *
866 * Return 0 if successful, or the return value of the failed video::s_stream
867 * operation otherwise.
868 */
isp_pipeline_enable(struct isp_pipeline * pipe,enum isp_pipeline_stream_state mode)869 static int isp_pipeline_enable(struct isp_pipeline *pipe,
870 enum isp_pipeline_stream_state mode)
871 {
872 struct isp_device *isp = pipe->output->isp;
873 struct media_entity *entity;
874 struct media_pad *pad;
875 struct v4l2_subdev *subdev;
876 unsigned long flags;
877 int ret;
878
879 /* Refuse to start streaming if an entity included in the pipeline has
880 * crashed. This check must be performed before the loop below to avoid
881 * starting entities if the pipeline won't start anyway (those entities
882 * would then likely fail to stop, making the problem worse).
883 */
884 if (pipe->entities & isp->crashed)
885 return -EIO;
886
887 spin_lock_irqsave(&pipe->lock, flags);
888 pipe->state &= ~(ISP_PIPELINE_IDLE_INPUT | ISP_PIPELINE_IDLE_OUTPUT);
889 spin_unlock_irqrestore(&pipe->lock, flags);
890
891 pipe->do_propagation = false;
892
893 entity = &pipe->output->video.entity;
894 while (1) {
895 pad = &entity->pads[0];
896 if (!(pad->flags & MEDIA_PAD_FL_SINK))
897 break;
898
899 pad = media_entity_remote_pad(pad);
900 if (pad == NULL ||
901 media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
902 break;
903
904 entity = pad->entity;
905 subdev = media_entity_to_v4l2_subdev(entity);
906
907 ret = v4l2_subdev_call(subdev, video, s_stream, mode);
908 if (ret < 0 && ret != -ENOIOCTLCMD)
909 return ret;
910
911 if (subdev == &isp->isp_ccdc.subdev) {
912 v4l2_subdev_call(&isp->isp_aewb.subdev, video,
913 s_stream, mode);
914 v4l2_subdev_call(&isp->isp_af.subdev, video,
915 s_stream, mode);
916 v4l2_subdev_call(&isp->isp_hist.subdev, video,
917 s_stream, mode);
918 pipe->do_propagation = true;
919 }
920
921 /* Stop at the first external sub-device. */
922 if (subdev->dev != isp->dev)
923 break;
924 }
925
926 return 0;
927 }
928
isp_pipeline_wait_resizer(struct isp_device * isp)929 static int isp_pipeline_wait_resizer(struct isp_device *isp)
930 {
931 return omap3isp_resizer_busy(&isp->isp_res);
932 }
933
isp_pipeline_wait_preview(struct isp_device * isp)934 static int isp_pipeline_wait_preview(struct isp_device *isp)
935 {
936 return omap3isp_preview_busy(&isp->isp_prev);
937 }
938
isp_pipeline_wait_ccdc(struct isp_device * isp)939 static int isp_pipeline_wait_ccdc(struct isp_device *isp)
940 {
941 return omap3isp_stat_busy(&isp->isp_af)
942 || omap3isp_stat_busy(&isp->isp_aewb)
943 || omap3isp_stat_busy(&isp->isp_hist)
944 || omap3isp_ccdc_busy(&isp->isp_ccdc);
945 }
946
947 #define ISP_STOP_TIMEOUT msecs_to_jiffies(1000)
948
isp_pipeline_wait(struct isp_device * isp,int (* busy)(struct isp_device * isp))949 static int isp_pipeline_wait(struct isp_device *isp,
950 int(*busy)(struct isp_device *isp))
951 {
952 unsigned long timeout = jiffies + ISP_STOP_TIMEOUT;
953
954 while (!time_after(jiffies, timeout)) {
955 if (!busy(isp))
956 return 0;
957 }
958
959 return 1;
960 }
961
962 /*
963 * isp_pipeline_disable - Disable streaming on a pipeline
964 * @pipe: ISP pipeline
965 *
966 * Walk the entities chain starting at the pipeline output video node and stop
967 * all modules in the chain. Wait synchronously for the modules to be stopped if
968 * necessary.
969 *
970 * Return 0 if all modules have been properly stopped, or -ETIMEDOUT if a module
971 * can't be stopped (in which case a software reset of the ISP is probably
972 * necessary).
973 */
isp_pipeline_disable(struct isp_pipeline * pipe)974 static int isp_pipeline_disable(struct isp_pipeline *pipe)
975 {
976 struct isp_device *isp = pipe->output->isp;
977 struct media_entity *entity;
978 struct media_pad *pad;
979 struct v4l2_subdev *subdev;
980 int failure = 0;
981 int ret;
982
983 /*
984 * We need to stop all the modules after CCDC first or they'll
985 * never stop since they may not get a full frame from CCDC.
986 */
987 entity = &pipe->output->video.entity;
988 while (1) {
989 pad = &entity->pads[0];
990 if (!(pad->flags & MEDIA_PAD_FL_SINK))
991 break;
992
993 pad = media_entity_remote_pad(pad);
994 if (pad == NULL ||
995 media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
996 break;
997
998 entity = pad->entity;
999 subdev = media_entity_to_v4l2_subdev(entity);
1000
1001 if (subdev == &isp->isp_ccdc.subdev) {
1002 v4l2_subdev_call(&isp->isp_aewb.subdev,
1003 video, s_stream, 0);
1004 v4l2_subdev_call(&isp->isp_af.subdev,
1005 video, s_stream, 0);
1006 v4l2_subdev_call(&isp->isp_hist.subdev,
1007 video, s_stream, 0);
1008 }
1009
1010 ret = v4l2_subdev_call(subdev, video, s_stream, 0);
1011
1012 if (subdev == &isp->isp_res.subdev)
1013 ret |= isp_pipeline_wait(isp, isp_pipeline_wait_resizer);
1014 else if (subdev == &isp->isp_prev.subdev)
1015 ret |= isp_pipeline_wait(isp, isp_pipeline_wait_preview);
1016 else if (subdev == &isp->isp_ccdc.subdev)
1017 ret |= isp_pipeline_wait(isp, isp_pipeline_wait_ccdc);
1018
1019 /* Handle stop failures. An entity that fails to stop can
1020 * usually just be restarted. Flag the stop failure nonetheless
1021 * to trigger an ISP reset the next time the device is released,
1022 * just in case.
1023 *
1024 * The preview engine is a special case. A failure to stop can
1025 * mean a hardware crash. When that happens the preview engine
1026 * won't respond to read/write operations on the L4 bus anymore,
1027 * resulting in a bus fault and a kernel oops next time it gets
1028 * accessed. Mark it as crashed to prevent pipelines including
1029 * it from being started.
1030 */
1031 if (ret) {
1032 dev_info(isp->dev, "Unable to stop %s\n", subdev->name);
1033 isp->stop_failure = true;
1034 if (subdev == &isp->isp_prev.subdev)
1035 isp->crashed |= 1U << subdev->entity.id;
1036 failure = -ETIMEDOUT;
1037 }
1038
1039 /* Stop at the first external sub-device. */
1040 if (subdev->dev != isp->dev)
1041 break;
1042 }
1043
1044 return failure;
1045 }
1046
1047 /*
1048 * omap3isp_pipeline_set_stream - Enable/disable streaming on a pipeline
1049 * @pipe: ISP pipeline
1050 * @state: Stream state (stopped, single shot or continuous)
1051 *
1052 * Set the pipeline to the given stream state. Pipelines can be started in
1053 * single-shot or continuous mode.
1054 *
1055 * Return 0 if successful, or the return value of the failed video::s_stream
1056 * operation otherwise. The pipeline state is not updated when the operation
1057 * fails, except when stopping the pipeline.
1058 */
omap3isp_pipeline_set_stream(struct isp_pipeline * pipe,enum isp_pipeline_stream_state state)1059 int omap3isp_pipeline_set_stream(struct isp_pipeline *pipe,
1060 enum isp_pipeline_stream_state state)
1061 {
1062 int ret;
1063
1064 if (state == ISP_PIPELINE_STREAM_STOPPED)
1065 ret = isp_pipeline_disable(pipe);
1066 else
1067 ret = isp_pipeline_enable(pipe, state);
1068
1069 if (ret == 0 || state == ISP_PIPELINE_STREAM_STOPPED)
1070 pipe->stream_state = state;
1071
1072 return ret;
1073 }
1074
1075 /*
1076 * omap3isp_pipeline_cancel_stream - Cancel stream on a pipeline
1077 * @pipe: ISP pipeline
1078 *
1079 * Cancelling a stream mark all buffers on all video nodes in the pipeline as
1080 * erroneous and makes sure no new buffer can be queued. This function is called
1081 * when a fatal error that prevents any further operation on the pipeline
1082 * occurs.
1083 */
omap3isp_pipeline_cancel_stream(struct isp_pipeline * pipe)1084 void omap3isp_pipeline_cancel_stream(struct isp_pipeline *pipe)
1085 {
1086 if (pipe->input)
1087 omap3isp_video_cancel_stream(pipe->input);
1088 if (pipe->output)
1089 omap3isp_video_cancel_stream(pipe->output);
1090 }
1091
1092 /*
1093 * isp_pipeline_resume - Resume streaming on a pipeline
1094 * @pipe: ISP pipeline
1095 *
1096 * Resume video output and input and re-enable pipeline.
1097 */
isp_pipeline_resume(struct isp_pipeline * pipe)1098 static void isp_pipeline_resume(struct isp_pipeline *pipe)
1099 {
1100 int singleshot = pipe->stream_state == ISP_PIPELINE_STREAM_SINGLESHOT;
1101
1102 omap3isp_video_resume(pipe->output, !singleshot);
1103 if (singleshot)
1104 omap3isp_video_resume(pipe->input, 0);
1105 isp_pipeline_enable(pipe, pipe->stream_state);
1106 }
1107
1108 /*
1109 * isp_pipeline_suspend - Suspend streaming on a pipeline
1110 * @pipe: ISP pipeline
1111 *
1112 * Suspend pipeline.
1113 */
isp_pipeline_suspend(struct isp_pipeline * pipe)1114 static void isp_pipeline_suspend(struct isp_pipeline *pipe)
1115 {
1116 isp_pipeline_disable(pipe);
1117 }
1118
1119 /*
1120 * isp_pipeline_is_last - Verify if entity has an enabled link to the output
1121 * video node
1122 * @me: ISP module's media entity
1123 *
1124 * Returns 1 if the entity has an enabled link to the output video node or 0
1125 * otherwise. It's true only while pipeline can have no more than one output
1126 * node.
1127 */
isp_pipeline_is_last(struct media_entity * me)1128 static int isp_pipeline_is_last(struct media_entity *me)
1129 {
1130 struct isp_pipeline *pipe;
1131 struct media_pad *pad;
1132
1133 if (!me->pipe)
1134 return 0;
1135 pipe = to_isp_pipeline(me);
1136 if (pipe->stream_state == ISP_PIPELINE_STREAM_STOPPED)
1137 return 0;
1138 pad = media_entity_remote_pad(&pipe->output->pad);
1139 return pad->entity == me;
1140 }
1141
1142 /*
1143 * isp_suspend_module_pipeline - Suspend pipeline to which belongs the module
1144 * @me: ISP module's media entity
1145 *
1146 * Suspend the whole pipeline if module's entity has an enabled link to the
1147 * output video node. It works only while pipeline can have no more than one
1148 * output node.
1149 */
isp_suspend_module_pipeline(struct media_entity * me)1150 static void isp_suspend_module_pipeline(struct media_entity *me)
1151 {
1152 if (isp_pipeline_is_last(me))
1153 isp_pipeline_suspend(to_isp_pipeline(me));
1154 }
1155
1156 /*
1157 * isp_resume_module_pipeline - Resume pipeline to which belongs the module
1158 * @me: ISP module's media entity
1159 *
1160 * Resume the whole pipeline if module's entity has an enabled link to the
1161 * output video node. It works only while pipeline can have no more than one
1162 * output node.
1163 */
isp_resume_module_pipeline(struct media_entity * me)1164 static void isp_resume_module_pipeline(struct media_entity *me)
1165 {
1166 if (isp_pipeline_is_last(me))
1167 isp_pipeline_resume(to_isp_pipeline(me));
1168 }
1169
1170 /*
1171 * isp_suspend_modules - Suspend ISP submodules.
1172 * @isp: OMAP3 ISP device
1173 *
1174 * Returns 0 if suspend left in idle state all the submodules properly,
1175 * or returns 1 if a general Reset is required to suspend the submodules.
1176 */
isp_suspend_modules(struct isp_device * isp)1177 static int isp_suspend_modules(struct isp_device *isp)
1178 {
1179 unsigned long timeout;
1180
1181 omap3isp_stat_suspend(&isp->isp_aewb);
1182 omap3isp_stat_suspend(&isp->isp_af);
1183 omap3isp_stat_suspend(&isp->isp_hist);
1184 isp_suspend_module_pipeline(&isp->isp_res.subdev.entity);
1185 isp_suspend_module_pipeline(&isp->isp_prev.subdev.entity);
1186 isp_suspend_module_pipeline(&isp->isp_ccdc.subdev.entity);
1187 isp_suspend_module_pipeline(&isp->isp_csi2a.subdev.entity);
1188 isp_suspend_module_pipeline(&isp->isp_ccp2.subdev.entity);
1189
1190 timeout = jiffies + ISP_STOP_TIMEOUT;
1191 while (omap3isp_stat_busy(&isp->isp_af)
1192 || omap3isp_stat_busy(&isp->isp_aewb)
1193 || omap3isp_stat_busy(&isp->isp_hist)
1194 || omap3isp_preview_busy(&isp->isp_prev)
1195 || omap3isp_resizer_busy(&isp->isp_res)
1196 || omap3isp_ccdc_busy(&isp->isp_ccdc)) {
1197 if (time_after(jiffies, timeout)) {
1198 dev_info(isp->dev, "can't stop modules.\n");
1199 return 1;
1200 }
1201 msleep(1);
1202 }
1203
1204 return 0;
1205 }
1206
1207 /*
1208 * isp_resume_modules - Resume ISP submodules.
1209 * @isp: OMAP3 ISP device
1210 */
isp_resume_modules(struct isp_device * isp)1211 static void isp_resume_modules(struct isp_device *isp)
1212 {
1213 omap3isp_stat_resume(&isp->isp_aewb);
1214 omap3isp_stat_resume(&isp->isp_af);
1215 omap3isp_stat_resume(&isp->isp_hist);
1216 isp_resume_module_pipeline(&isp->isp_res.subdev.entity);
1217 isp_resume_module_pipeline(&isp->isp_prev.subdev.entity);
1218 isp_resume_module_pipeline(&isp->isp_ccdc.subdev.entity);
1219 isp_resume_module_pipeline(&isp->isp_csi2a.subdev.entity);
1220 isp_resume_module_pipeline(&isp->isp_ccp2.subdev.entity);
1221 }
1222
1223 /*
1224 * isp_reset - Reset ISP with a timeout wait for idle.
1225 * @isp: OMAP3 ISP device
1226 */
isp_reset(struct isp_device * isp)1227 static int isp_reset(struct isp_device *isp)
1228 {
1229 unsigned long timeout = 0;
1230
1231 isp_reg_writel(isp,
1232 isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_SYSCONFIG)
1233 | ISP_SYSCONFIG_SOFTRESET,
1234 OMAP3_ISP_IOMEM_MAIN, ISP_SYSCONFIG);
1235 while (!(isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN,
1236 ISP_SYSSTATUS) & 0x1)) {
1237 if (timeout++ > 10000) {
1238 dev_alert(isp->dev, "cannot reset ISP\n");
1239 return -ETIMEDOUT;
1240 }
1241 udelay(1);
1242 }
1243
1244 isp->stop_failure = false;
1245 isp->crashed = 0;
1246 return 0;
1247 }
1248
1249 /*
1250 * isp_save_context - Saves the values of the ISP module registers.
1251 * @isp: OMAP3 ISP device
1252 * @reg_list: Structure containing pairs of register address and value to
1253 * modify on OMAP.
1254 */
1255 static void
isp_save_context(struct isp_device * isp,struct isp_reg * reg_list)1256 isp_save_context(struct isp_device *isp, struct isp_reg *reg_list)
1257 {
1258 struct isp_reg *next = reg_list;
1259
1260 for (; next->reg != ISP_TOK_TERM; next++)
1261 next->val = isp_reg_readl(isp, next->mmio_range, next->reg);
1262 }
1263
1264 /*
1265 * isp_restore_context - Restores the values of the ISP module registers.
1266 * @isp: OMAP3 ISP device
1267 * @reg_list: Structure containing pairs of register address and value to
1268 * modify on OMAP.
1269 */
1270 static void
isp_restore_context(struct isp_device * isp,struct isp_reg * reg_list)1271 isp_restore_context(struct isp_device *isp, struct isp_reg *reg_list)
1272 {
1273 struct isp_reg *next = reg_list;
1274
1275 for (; next->reg != ISP_TOK_TERM; next++)
1276 isp_reg_writel(isp, next->val, next->mmio_range, next->reg);
1277 }
1278
1279 /*
1280 * isp_save_ctx - Saves ISP, CCDC, HIST, H3A, PREV, RESZ & MMU context.
1281 * @isp: OMAP3 ISP device
1282 *
1283 * Routine for saving the context of each module in the ISP.
1284 * CCDC, HIST, H3A, PREV, RESZ and MMU.
1285 */
isp_save_ctx(struct isp_device * isp)1286 static void isp_save_ctx(struct isp_device *isp)
1287 {
1288 isp_save_context(isp, isp_reg_list);
1289 omap_iommu_save_ctx(isp->dev);
1290 }
1291
1292 /*
1293 * isp_restore_ctx - Restores ISP, CCDC, HIST, H3A, PREV, RESZ & MMU context.
1294 * @isp: OMAP3 ISP device
1295 *
1296 * Routine for restoring the context of each module in the ISP.
1297 * CCDC, HIST, H3A, PREV, RESZ and MMU.
1298 */
isp_restore_ctx(struct isp_device * isp)1299 static void isp_restore_ctx(struct isp_device *isp)
1300 {
1301 isp_restore_context(isp, isp_reg_list);
1302 omap_iommu_restore_ctx(isp->dev);
1303 omap3isp_ccdc_restore_context(isp);
1304 omap3isp_preview_restore_context(isp);
1305 }
1306
1307 /* -----------------------------------------------------------------------------
1308 * SBL resources management
1309 */
1310 #define OMAP3_ISP_SBL_READ (OMAP3_ISP_SBL_CSI1_READ | \
1311 OMAP3_ISP_SBL_CCDC_LSC_READ | \
1312 OMAP3_ISP_SBL_PREVIEW_READ | \
1313 OMAP3_ISP_SBL_RESIZER_READ)
1314 #define OMAP3_ISP_SBL_WRITE (OMAP3_ISP_SBL_CSI1_WRITE | \
1315 OMAP3_ISP_SBL_CSI2A_WRITE | \
1316 OMAP3_ISP_SBL_CSI2C_WRITE | \
1317 OMAP3_ISP_SBL_CCDC_WRITE | \
1318 OMAP3_ISP_SBL_PREVIEW_WRITE)
1319
omap3isp_sbl_enable(struct isp_device * isp,enum isp_sbl_resource res)1320 void omap3isp_sbl_enable(struct isp_device *isp, enum isp_sbl_resource res)
1321 {
1322 u32 sbl = 0;
1323
1324 isp->sbl_resources |= res;
1325
1326 if (isp->sbl_resources & OMAP3_ISP_SBL_CSI1_READ)
1327 sbl |= ISPCTRL_SBL_SHARED_RPORTA;
1328
1329 if (isp->sbl_resources & OMAP3_ISP_SBL_CCDC_LSC_READ)
1330 sbl |= ISPCTRL_SBL_SHARED_RPORTB;
1331
1332 if (isp->sbl_resources & OMAP3_ISP_SBL_CSI2C_WRITE)
1333 sbl |= ISPCTRL_SBL_SHARED_WPORTC;
1334
1335 if (isp->sbl_resources & OMAP3_ISP_SBL_RESIZER_WRITE)
1336 sbl |= ISPCTRL_SBL_WR0_RAM_EN;
1337
1338 if (isp->sbl_resources & OMAP3_ISP_SBL_WRITE)
1339 sbl |= ISPCTRL_SBL_WR1_RAM_EN;
1340
1341 if (isp->sbl_resources & OMAP3_ISP_SBL_READ)
1342 sbl |= ISPCTRL_SBL_RD_RAM_EN;
1343
1344 isp_reg_set(isp, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL, sbl);
1345 }
1346
omap3isp_sbl_disable(struct isp_device * isp,enum isp_sbl_resource res)1347 void omap3isp_sbl_disable(struct isp_device *isp, enum isp_sbl_resource res)
1348 {
1349 u32 sbl = 0;
1350
1351 isp->sbl_resources &= ~res;
1352
1353 if (!(isp->sbl_resources & OMAP3_ISP_SBL_CSI1_READ))
1354 sbl |= ISPCTRL_SBL_SHARED_RPORTA;
1355
1356 if (!(isp->sbl_resources & OMAP3_ISP_SBL_CCDC_LSC_READ))
1357 sbl |= ISPCTRL_SBL_SHARED_RPORTB;
1358
1359 if (!(isp->sbl_resources & OMAP3_ISP_SBL_CSI2C_WRITE))
1360 sbl |= ISPCTRL_SBL_SHARED_WPORTC;
1361
1362 if (!(isp->sbl_resources & OMAP3_ISP_SBL_RESIZER_WRITE))
1363 sbl |= ISPCTRL_SBL_WR0_RAM_EN;
1364
1365 if (!(isp->sbl_resources & OMAP3_ISP_SBL_WRITE))
1366 sbl |= ISPCTRL_SBL_WR1_RAM_EN;
1367
1368 if (!(isp->sbl_resources & OMAP3_ISP_SBL_READ))
1369 sbl |= ISPCTRL_SBL_RD_RAM_EN;
1370
1371 isp_reg_clr(isp, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL, sbl);
1372 }
1373
1374 /*
1375 * isp_module_sync_idle - Helper to sync module with its idle state
1376 * @me: ISP submodule's media entity
1377 * @wait: ISP submodule's wait queue for streamoff/interrupt synchronization
1378 * @stopping: flag which tells module wants to stop
1379 *
1380 * This function checks if ISP submodule needs to wait for next interrupt. If
1381 * yes, makes the caller to sleep while waiting for such event.
1382 */
omap3isp_module_sync_idle(struct media_entity * me,wait_queue_head_t * wait,atomic_t * stopping)1383 int omap3isp_module_sync_idle(struct media_entity *me, wait_queue_head_t *wait,
1384 atomic_t *stopping)
1385 {
1386 struct isp_pipeline *pipe = to_isp_pipeline(me);
1387
1388 if (pipe->stream_state == ISP_PIPELINE_STREAM_STOPPED ||
1389 (pipe->stream_state == ISP_PIPELINE_STREAM_SINGLESHOT &&
1390 !isp_pipeline_ready(pipe)))
1391 return 0;
1392
1393 /*
1394 * atomic_set() doesn't include memory barrier on ARM platform for SMP
1395 * scenario. We'll call it here to avoid race conditions.
1396 */
1397 atomic_set(stopping, 1);
1398 smp_mb();
1399
1400 /*
1401 * If module is the last one, it's writing to memory. In this case,
1402 * it's necessary to check if the module is already paused due to
1403 * DMA queue underrun or if it has to wait for next interrupt to be
1404 * idle.
1405 * If it isn't the last one, the function won't sleep but *stopping
1406 * will still be set to warn next submodule caller's interrupt the
1407 * module wants to be idle.
1408 */
1409 if (isp_pipeline_is_last(me)) {
1410 struct isp_video *video = pipe->output;
1411 unsigned long flags;
1412 spin_lock_irqsave(&video->irqlock, flags);
1413 if (video->dmaqueue_flags & ISP_VIDEO_DMAQUEUE_UNDERRUN) {
1414 spin_unlock_irqrestore(&video->irqlock, flags);
1415 atomic_set(stopping, 0);
1416 smp_mb();
1417 return 0;
1418 }
1419 spin_unlock_irqrestore(&video->irqlock, flags);
1420 if (!wait_event_timeout(*wait, !atomic_read(stopping),
1421 msecs_to_jiffies(1000))) {
1422 atomic_set(stopping, 0);
1423 smp_mb();
1424 return -ETIMEDOUT;
1425 }
1426 }
1427
1428 return 0;
1429 }
1430
1431 /*
1432 * omap3isp_module_sync_is_stopping - Helper to verify if module was stopping
1433 * @wait: ISP submodule's wait queue for streamoff/interrupt synchronization
1434 * @stopping: flag which tells module wants to stop
1435 *
1436 * This function checks if ISP submodule was stopping. In case of yes, it
1437 * notices the caller by setting stopping to 0 and waking up the wait queue.
1438 * Returns 1 if it was stopping or 0 otherwise.
1439 */
omap3isp_module_sync_is_stopping(wait_queue_head_t * wait,atomic_t * stopping)1440 int omap3isp_module_sync_is_stopping(wait_queue_head_t *wait,
1441 atomic_t *stopping)
1442 {
1443 if (atomic_cmpxchg(stopping, 1, 0)) {
1444 wake_up(wait);
1445 return 1;
1446 }
1447
1448 return 0;
1449 }
1450
1451 /* --------------------------------------------------------------------------
1452 * Clock management
1453 */
1454
1455 #define ISPCTRL_CLKS_MASK (ISPCTRL_H3A_CLK_EN | \
1456 ISPCTRL_HIST_CLK_EN | \
1457 ISPCTRL_RSZ_CLK_EN | \
1458 (ISPCTRL_CCDC_CLK_EN | ISPCTRL_CCDC_RAM_EN) | \
1459 (ISPCTRL_PREV_CLK_EN | ISPCTRL_PREV_RAM_EN))
1460
__isp_subclk_update(struct isp_device * isp)1461 static void __isp_subclk_update(struct isp_device *isp)
1462 {
1463 u32 clk = 0;
1464
1465 /* AEWB and AF share the same clock. */
1466 if (isp->subclk_resources &
1467 (OMAP3_ISP_SUBCLK_AEWB | OMAP3_ISP_SUBCLK_AF))
1468 clk |= ISPCTRL_H3A_CLK_EN;
1469
1470 if (isp->subclk_resources & OMAP3_ISP_SUBCLK_HIST)
1471 clk |= ISPCTRL_HIST_CLK_EN;
1472
1473 if (isp->subclk_resources & OMAP3_ISP_SUBCLK_RESIZER)
1474 clk |= ISPCTRL_RSZ_CLK_EN;
1475
1476 /* NOTE: For CCDC & Preview submodules, we need to affect internal
1477 * RAM as well.
1478 */
1479 if (isp->subclk_resources & OMAP3_ISP_SUBCLK_CCDC)
1480 clk |= ISPCTRL_CCDC_CLK_EN | ISPCTRL_CCDC_RAM_EN;
1481
1482 if (isp->subclk_resources & OMAP3_ISP_SUBCLK_PREVIEW)
1483 clk |= ISPCTRL_PREV_CLK_EN | ISPCTRL_PREV_RAM_EN;
1484
1485 isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL,
1486 ISPCTRL_CLKS_MASK, clk);
1487 }
1488
omap3isp_subclk_enable(struct isp_device * isp,enum isp_subclk_resource res)1489 void omap3isp_subclk_enable(struct isp_device *isp,
1490 enum isp_subclk_resource res)
1491 {
1492 isp->subclk_resources |= res;
1493
1494 __isp_subclk_update(isp);
1495 }
1496
omap3isp_subclk_disable(struct isp_device * isp,enum isp_subclk_resource res)1497 void omap3isp_subclk_disable(struct isp_device *isp,
1498 enum isp_subclk_resource res)
1499 {
1500 isp->subclk_resources &= ~res;
1501
1502 __isp_subclk_update(isp);
1503 }
1504
1505 /*
1506 * isp_enable_clocks - Enable ISP clocks
1507 * @isp: OMAP3 ISP device
1508 *
1509 * Return 0 if successful, or clk_prepare_enable return value if any of them
1510 * fails.
1511 */
isp_enable_clocks(struct isp_device * isp)1512 static int isp_enable_clocks(struct isp_device *isp)
1513 {
1514 int r;
1515 unsigned long rate;
1516
1517 r = clk_prepare_enable(isp->clock[ISP_CLK_CAM_ICK]);
1518 if (r) {
1519 dev_err(isp->dev, "failed to enable cam_ick clock\n");
1520 goto out_clk_enable_ick;
1521 }
1522 r = clk_set_rate(isp->clock[ISP_CLK_CAM_MCLK], CM_CAM_MCLK_HZ);
1523 if (r) {
1524 dev_err(isp->dev, "clk_set_rate for cam_mclk failed\n");
1525 goto out_clk_enable_mclk;
1526 }
1527 r = clk_prepare_enable(isp->clock[ISP_CLK_CAM_MCLK]);
1528 if (r) {
1529 dev_err(isp->dev, "failed to enable cam_mclk clock\n");
1530 goto out_clk_enable_mclk;
1531 }
1532 rate = clk_get_rate(isp->clock[ISP_CLK_CAM_MCLK]);
1533 if (rate != CM_CAM_MCLK_HZ)
1534 dev_warn(isp->dev, "unexpected cam_mclk rate:\n"
1535 " expected : %d\n"
1536 " actual : %ld\n", CM_CAM_MCLK_HZ, rate);
1537 r = clk_prepare_enable(isp->clock[ISP_CLK_CSI2_FCK]);
1538 if (r) {
1539 dev_err(isp->dev, "failed to enable csi2_fck clock\n");
1540 goto out_clk_enable_csi2_fclk;
1541 }
1542 return 0;
1543
1544 out_clk_enable_csi2_fclk:
1545 clk_disable_unprepare(isp->clock[ISP_CLK_CAM_MCLK]);
1546 out_clk_enable_mclk:
1547 clk_disable_unprepare(isp->clock[ISP_CLK_CAM_ICK]);
1548 out_clk_enable_ick:
1549 return r;
1550 }
1551
1552 /*
1553 * isp_disable_clocks - Disable ISP clocks
1554 * @isp: OMAP3 ISP device
1555 */
isp_disable_clocks(struct isp_device * isp)1556 static void isp_disable_clocks(struct isp_device *isp)
1557 {
1558 clk_disable_unprepare(isp->clock[ISP_CLK_CAM_ICK]);
1559 clk_disable_unprepare(isp->clock[ISP_CLK_CAM_MCLK]);
1560 clk_disable_unprepare(isp->clock[ISP_CLK_CSI2_FCK]);
1561 }
1562
1563 static const char *isp_clocks[] = {
1564 "cam_ick",
1565 "cam_mclk",
1566 "csi2_96m_fck",
1567 "l3_ick",
1568 };
1569
isp_get_clocks(struct isp_device * isp)1570 static int isp_get_clocks(struct isp_device *isp)
1571 {
1572 struct clk *clk;
1573 unsigned int i;
1574
1575 for (i = 0; i < ARRAY_SIZE(isp_clocks); ++i) {
1576 clk = devm_clk_get(isp->dev, isp_clocks[i]);
1577 if (IS_ERR(clk)) {
1578 dev_err(isp->dev, "clk_get %s failed\n", isp_clocks[i]);
1579 return PTR_ERR(clk);
1580 }
1581
1582 isp->clock[i] = clk;
1583 }
1584
1585 return 0;
1586 }
1587
1588 /*
1589 * omap3isp_get - Acquire the ISP resource.
1590 *
1591 * Initializes the clocks for the first acquire.
1592 *
1593 * Increment the reference count on the ISP. If the first reference is taken,
1594 * enable clocks and power-up all submodules.
1595 *
1596 * Return a pointer to the ISP device structure, or NULL if an error occurred.
1597 */
__omap3isp_get(struct isp_device * isp,bool irq)1598 static struct isp_device *__omap3isp_get(struct isp_device *isp, bool irq)
1599 {
1600 struct isp_device *__isp = isp;
1601
1602 if (isp == NULL)
1603 return NULL;
1604
1605 mutex_lock(&isp->isp_mutex);
1606 if (isp->ref_count > 0)
1607 goto out;
1608
1609 if (isp_enable_clocks(isp) < 0) {
1610 __isp = NULL;
1611 goto out;
1612 }
1613
1614 /* We don't want to restore context before saving it! */
1615 if (isp->has_context)
1616 isp_restore_ctx(isp);
1617
1618 if (irq)
1619 isp_enable_interrupts(isp);
1620
1621 out:
1622 if (__isp != NULL)
1623 isp->ref_count++;
1624 mutex_unlock(&isp->isp_mutex);
1625
1626 return __isp;
1627 }
1628
omap3isp_get(struct isp_device * isp)1629 struct isp_device *omap3isp_get(struct isp_device *isp)
1630 {
1631 return __omap3isp_get(isp, true);
1632 }
1633
1634 /*
1635 * omap3isp_put - Release the ISP
1636 *
1637 * Decrement the reference count on the ISP. If the last reference is released,
1638 * power-down all submodules, disable clocks and free temporary buffers.
1639 */
__omap3isp_put(struct isp_device * isp,bool save_ctx)1640 static void __omap3isp_put(struct isp_device *isp, bool save_ctx)
1641 {
1642 if (isp == NULL)
1643 return;
1644
1645 mutex_lock(&isp->isp_mutex);
1646 BUG_ON(isp->ref_count == 0);
1647 if (--isp->ref_count == 0) {
1648 isp_disable_interrupts(isp);
1649 if (save_ctx) {
1650 isp_save_ctx(isp);
1651 isp->has_context = 1;
1652 }
1653 /* Reset the ISP if an entity has failed to stop. This is the
1654 * only way to recover from such conditions.
1655 */
1656 if (isp->crashed || isp->stop_failure)
1657 isp_reset(isp);
1658 isp_disable_clocks(isp);
1659 }
1660 mutex_unlock(&isp->isp_mutex);
1661 }
1662
omap3isp_put(struct isp_device * isp)1663 void omap3isp_put(struct isp_device *isp)
1664 {
1665 __omap3isp_put(isp, true);
1666 }
1667
1668 /* --------------------------------------------------------------------------
1669 * Platform device driver
1670 */
1671
1672 /*
1673 * omap3isp_print_status - Prints the values of the ISP Control Module registers
1674 * @isp: OMAP3 ISP device
1675 */
1676 #define ISP_PRINT_REGISTER(isp, name)\
1677 dev_dbg(isp->dev, "###ISP " #name "=0x%08x\n", \
1678 isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_##name))
1679 #define SBL_PRINT_REGISTER(isp, name)\
1680 dev_dbg(isp->dev, "###SBL " #name "=0x%08x\n", \
1681 isp_reg_readl(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_##name))
1682
omap3isp_print_status(struct isp_device * isp)1683 void omap3isp_print_status(struct isp_device *isp)
1684 {
1685 dev_dbg(isp->dev, "-------------ISP Register dump--------------\n");
1686
1687 ISP_PRINT_REGISTER(isp, SYSCONFIG);
1688 ISP_PRINT_REGISTER(isp, SYSSTATUS);
1689 ISP_PRINT_REGISTER(isp, IRQ0ENABLE);
1690 ISP_PRINT_REGISTER(isp, IRQ0STATUS);
1691 ISP_PRINT_REGISTER(isp, TCTRL_GRESET_LENGTH);
1692 ISP_PRINT_REGISTER(isp, TCTRL_PSTRB_REPLAY);
1693 ISP_PRINT_REGISTER(isp, CTRL);
1694 ISP_PRINT_REGISTER(isp, TCTRL_CTRL);
1695 ISP_PRINT_REGISTER(isp, TCTRL_FRAME);
1696 ISP_PRINT_REGISTER(isp, TCTRL_PSTRB_DELAY);
1697 ISP_PRINT_REGISTER(isp, TCTRL_STRB_DELAY);
1698 ISP_PRINT_REGISTER(isp, TCTRL_SHUT_DELAY);
1699 ISP_PRINT_REGISTER(isp, TCTRL_PSTRB_LENGTH);
1700 ISP_PRINT_REGISTER(isp, TCTRL_STRB_LENGTH);
1701 ISP_PRINT_REGISTER(isp, TCTRL_SHUT_LENGTH);
1702
1703 SBL_PRINT_REGISTER(isp, PCR);
1704 SBL_PRINT_REGISTER(isp, SDR_REQ_EXP);
1705
1706 dev_dbg(isp->dev, "--------------------------------------------\n");
1707 }
1708
1709 #ifdef CONFIG_PM
1710
1711 /*
1712 * Power management support.
1713 *
1714 * As the ISP can't properly handle an input video stream interruption on a non
1715 * frame boundary, the ISP pipelines need to be stopped before sensors get
1716 * suspended. However, as suspending the sensors can require a running clock,
1717 * which can be provided by the ISP, the ISP can't be completely suspended
1718 * before the sensor.
1719 *
1720 * To solve this problem power management support is split into prepare/complete
1721 * and suspend/resume operations. The pipelines are stopped in prepare() and the
1722 * ISP clocks get disabled in suspend(). Similarly, the clocks are reenabled in
1723 * resume(), and the the pipelines are restarted in complete().
1724 *
1725 * TODO: PM dependencies between the ISP and sensors are not modelled explicitly
1726 * yet.
1727 */
isp_pm_prepare(struct device * dev)1728 static int isp_pm_prepare(struct device *dev)
1729 {
1730 struct isp_device *isp = dev_get_drvdata(dev);
1731 int reset;
1732
1733 WARN_ON(mutex_is_locked(&isp->isp_mutex));
1734
1735 if (isp->ref_count == 0)
1736 return 0;
1737
1738 reset = isp_suspend_modules(isp);
1739 isp_disable_interrupts(isp);
1740 isp_save_ctx(isp);
1741 if (reset)
1742 isp_reset(isp);
1743
1744 return 0;
1745 }
1746
isp_pm_suspend(struct device * dev)1747 static int isp_pm_suspend(struct device *dev)
1748 {
1749 struct isp_device *isp = dev_get_drvdata(dev);
1750
1751 WARN_ON(mutex_is_locked(&isp->isp_mutex));
1752
1753 if (isp->ref_count)
1754 isp_disable_clocks(isp);
1755
1756 return 0;
1757 }
1758
isp_pm_resume(struct device * dev)1759 static int isp_pm_resume(struct device *dev)
1760 {
1761 struct isp_device *isp = dev_get_drvdata(dev);
1762
1763 if (isp->ref_count == 0)
1764 return 0;
1765
1766 return isp_enable_clocks(isp);
1767 }
1768
isp_pm_complete(struct device * dev)1769 static void isp_pm_complete(struct device *dev)
1770 {
1771 struct isp_device *isp = dev_get_drvdata(dev);
1772
1773 if (isp->ref_count == 0)
1774 return;
1775
1776 isp_restore_ctx(isp);
1777 isp_enable_interrupts(isp);
1778 isp_resume_modules(isp);
1779 }
1780
1781 #else
1782
1783 #define isp_pm_prepare NULL
1784 #define isp_pm_suspend NULL
1785 #define isp_pm_resume NULL
1786 #define isp_pm_complete NULL
1787
1788 #endif /* CONFIG_PM */
1789
isp_unregister_entities(struct isp_device * isp)1790 static void isp_unregister_entities(struct isp_device *isp)
1791 {
1792 omap3isp_csi2_unregister_entities(&isp->isp_csi2a);
1793 omap3isp_ccp2_unregister_entities(&isp->isp_ccp2);
1794 omap3isp_ccdc_unregister_entities(&isp->isp_ccdc);
1795 omap3isp_preview_unregister_entities(&isp->isp_prev);
1796 omap3isp_resizer_unregister_entities(&isp->isp_res);
1797 omap3isp_stat_unregister_entities(&isp->isp_aewb);
1798 omap3isp_stat_unregister_entities(&isp->isp_af);
1799 omap3isp_stat_unregister_entities(&isp->isp_hist);
1800
1801 v4l2_device_unregister(&isp->v4l2_dev);
1802 media_device_unregister(&isp->media_dev);
1803 }
1804
isp_link_entity(struct isp_device * isp,struct media_entity * entity,enum isp_interface_type interface)1805 static int isp_link_entity(
1806 struct isp_device *isp, struct media_entity *entity,
1807 enum isp_interface_type interface)
1808 {
1809 struct media_entity *input;
1810 unsigned int flags;
1811 unsigned int pad;
1812 unsigned int i;
1813
1814 /* Connect the sensor to the correct interface module.
1815 * Parallel sensors are connected directly to the CCDC, while
1816 * serial sensors are connected to the CSI2a, CCP2b or CSI2c
1817 * receiver through CSIPHY1 or CSIPHY2.
1818 */
1819 switch (interface) {
1820 case ISP_INTERFACE_PARALLEL:
1821 input = &isp->isp_ccdc.subdev.entity;
1822 pad = CCDC_PAD_SINK;
1823 flags = 0;
1824 break;
1825
1826 case ISP_INTERFACE_CSI2A_PHY2:
1827 input = &isp->isp_csi2a.subdev.entity;
1828 pad = CSI2_PAD_SINK;
1829 flags = MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED;
1830 break;
1831
1832 case ISP_INTERFACE_CCP2B_PHY1:
1833 case ISP_INTERFACE_CCP2B_PHY2:
1834 input = &isp->isp_ccp2.subdev.entity;
1835 pad = CCP2_PAD_SINK;
1836 flags = 0;
1837 break;
1838
1839 case ISP_INTERFACE_CSI2C_PHY1:
1840 input = &isp->isp_csi2c.subdev.entity;
1841 pad = CSI2_PAD_SINK;
1842 flags = MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED;
1843 break;
1844
1845 default:
1846 dev_err(isp->dev, "%s: invalid interface type %u\n", __func__,
1847 interface);
1848 return -EINVAL;
1849 }
1850
1851 /*
1852 * Not all interfaces are available on all revisions of the
1853 * ISP. The sub-devices of those interfaces aren't initialised
1854 * in such a case. Check this by ensuring the num_pads is
1855 * non-zero.
1856 */
1857 if (!input->num_pads) {
1858 dev_err(isp->dev, "%s: invalid input %u\n", entity->name,
1859 interface);
1860 return -EINVAL;
1861 }
1862
1863 for (i = 0; i < entity->num_pads; i++) {
1864 if (entity->pads[i].flags & MEDIA_PAD_FL_SOURCE)
1865 break;
1866 }
1867 if (i == entity->num_pads) {
1868 dev_err(isp->dev, "%s: no source pad in external entity\n",
1869 __func__);
1870 return -EINVAL;
1871 }
1872
1873 return media_entity_create_link(entity, i, input, pad, flags);
1874 }
1875
isp_register_entities(struct isp_device * isp)1876 static int isp_register_entities(struct isp_device *isp)
1877 {
1878 int ret;
1879
1880 isp->media_dev.dev = isp->dev;
1881 strlcpy(isp->media_dev.model, "TI OMAP3 ISP",
1882 sizeof(isp->media_dev.model));
1883 isp->media_dev.hw_revision = isp->revision;
1884 isp->media_dev.link_notify = isp_pipeline_link_notify;
1885 ret = media_device_register(&isp->media_dev);
1886 if (ret < 0) {
1887 dev_err(isp->dev, "%s: Media device registration failed (%d)\n",
1888 __func__, ret);
1889 return ret;
1890 }
1891
1892 isp->v4l2_dev.mdev = &isp->media_dev;
1893 ret = v4l2_device_register(isp->dev, &isp->v4l2_dev);
1894 if (ret < 0) {
1895 dev_err(isp->dev, "%s: V4L2 device registration failed (%d)\n",
1896 __func__, ret);
1897 goto done;
1898 }
1899
1900 /* Register internal entities */
1901 ret = omap3isp_ccp2_register_entities(&isp->isp_ccp2, &isp->v4l2_dev);
1902 if (ret < 0)
1903 goto done;
1904
1905 ret = omap3isp_csi2_register_entities(&isp->isp_csi2a, &isp->v4l2_dev);
1906 if (ret < 0)
1907 goto done;
1908
1909 ret = omap3isp_ccdc_register_entities(&isp->isp_ccdc, &isp->v4l2_dev);
1910 if (ret < 0)
1911 goto done;
1912
1913 ret = omap3isp_preview_register_entities(&isp->isp_prev,
1914 &isp->v4l2_dev);
1915 if (ret < 0)
1916 goto done;
1917
1918 ret = omap3isp_resizer_register_entities(&isp->isp_res, &isp->v4l2_dev);
1919 if (ret < 0)
1920 goto done;
1921
1922 ret = omap3isp_stat_register_entities(&isp->isp_aewb, &isp->v4l2_dev);
1923 if (ret < 0)
1924 goto done;
1925
1926 ret = omap3isp_stat_register_entities(&isp->isp_af, &isp->v4l2_dev);
1927 if (ret < 0)
1928 goto done;
1929
1930 ret = omap3isp_stat_register_entities(&isp->isp_hist, &isp->v4l2_dev);
1931 if (ret < 0)
1932 goto done;
1933
1934 done:
1935 if (ret < 0)
1936 isp_unregister_entities(isp);
1937
1938 return ret;
1939 }
1940
isp_cleanup_modules(struct isp_device * isp)1941 static void isp_cleanup_modules(struct isp_device *isp)
1942 {
1943 omap3isp_h3a_aewb_cleanup(isp);
1944 omap3isp_h3a_af_cleanup(isp);
1945 omap3isp_hist_cleanup(isp);
1946 omap3isp_resizer_cleanup(isp);
1947 omap3isp_preview_cleanup(isp);
1948 omap3isp_ccdc_cleanup(isp);
1949 omap3isp_ccp2_cleanup(isp);
1950 omap3isp_csi2_cleanup(isp);
1951 }
1952
isp_initialize_modules(struct isp_device * isp)1953 static int isp_initialize_modules(struct isp_device *isp)
1954 {
1955 int ret;
1956
1957 ret = omap3isp_csiphy_init(isp);
1958 if (ret < 0) {
1959 dev_err(isp->dev, "CSI PHY initialization failed\n");
1960 goto error_csiphy;
1961 }
1962
1963 ret = omap3isp_csi2_init(isp);
1964 if (ret < 0) {
1965 dev_err(isp->dev, "CSI2 initialization failed\n");
1966 goto error_csi2;
1967 }
1968
1969 ret = omap3isp_ccp2_init(isp);
1970 if (ret < 0) {
1971 dev_err(isp->dev, "CCP2 initialization failed\n");
1972 goto error_ccp2;
1973 }
1974
1975 ret = omap3isp_ccdc_init(isp);
1976 if (ret < 0) {
1977 dev_err(isp->dev, "CCDC initialization failed\n");
1978 goto error_ccdc;
1979 }
1980
1981 ret = omap3isp_preview_init(isp);
1982 if (ret < 0) {
1983 dev_err(isp->dev, "Preview initialization failed\n");
1984 goto error_preview;
1985 }
1986
1987 ret = omap3isp_resizer_init(isp);
1988 if (ret < 0) {
1989 dev_err(isp->dev, "Resizer initialization failed\n");
1990 goto error_resizer;
1991 }
1992
1993 ret = omap3isp_hist_init(isp);
1994 if (ret < 0) {
1995 dev_err(isp->dev, "Histogram initialization failed\n");
1996 goto error_hist;
1997 }
1998
1999 ret = omap3isp_h3a_aewb_init(isp);
2000 if (ret < 0) {
2001 dev_err(isp->dev, "H3A AEWB initialization failed\n");
2002 goto error_h3a_aewb;
2003 }
2004
2005 ret = omap3isp_h3a_af_init(isp);
2006 if (ret < 0) {
2007 dev_err(isp->dev, "H3A AF initialization failed\n");
2008 goto error_h3a_af;
2009 }
2010
2011 /* Connect the submodules. */
2012 ret = media_entity_create_link(
2013 &isp->isp_csi2a.subdev.entity, CSI2_PAD_SOURCE,
2014 &isp->isp_ccdc.subdev.entity, CCDC_PAD_SINK, 0);
2015 if (ret < 0)
2016 goto error_link;
2017
2018 ret = media_entity_create_link(
2019 &isp->isp_ccp2.subdev.entity, CCP2_PAD_SOURCE,
2020 &isp->isp_ccdc.subdev.entity, CCDC_PAD_SINK, 0);
2021 if (ret < 0)
2022 goto error_link;
2023
2024 ret = media_entity_create_link(
2025 &isp->isp_ccdc.subdev.entity, CCDC_PAD_SOURCE_VP,
2026 &isp->isp_prev.subdev.entity, PREV_PAD_SINK, 0);
2027 if (ret < 0)
2028 goto error_link;
2029
2030 ret = media_entity_create_link(
2031 &isp->isp_ccdc.subdev.entity, CCDC_PAD_SOURCE_OF,
2032 &isp->isp_res.subdev.entity, RESZ_PAD_SINK, 0);
2033 if (ret < 0)
2034 goto error_link;
2035
2036 ret = media_entity_create_link(
2037 &isp->isp_prev.subdev.entity, PREV_PAD_SOURCE,
2038 &isp->isp_res.subdev.entity, RESZ_PAD_SINK, 0);
2039 if (ret < 0)
2040 goto error_link;
2041
2042 ret = media_entity_create_link(
2043 &isp->isp_ccdc.subdev.entity, CCDC_PAD_SOURCE_VP,
2044 &isp->isp_aewb.subdev.entity, 0,
2045 MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
2046 if (ret < 0)
2047 goto error_link;
2048
2049 ret = media_entity_create_link(
2050 &isp->isp_ccdc.subdev.entity, CCDC_PAD_SOURCE_VP,
2051 &isp->isp_af.subdev.entity, 0,
2052 MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
2053 if (ret < 0)
2054 goto error_link;
2055
2056 ret = media_entity_create_link(
2057 &isp->isp_ccdc.subdev.entity, CCDC_PAD_SOURCE_VP,
2058 &isp->isp_hist.subdev.entity, 0,
2059 MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
2060 if (ret < 0)
2061 goto error_link;
2062
2063 return 0;
2064
2065 error_link:
2066 omap3isp_h3a_af_cleanup(isp);
2067 error_h3a_af:
2068 omap3isp_h3a_aewb_cleanup(isp);
2069 error_h3a_aewb:
2070 omap3isp_hist_cleanup(isp);
2071 error_hist:
2072 omap3isp_resizer_cleanup(isp);
2073 error_resizer:
2074 omap3isp_preview_cleanup(isp);
2075 error_preview:
2076 omap3isp_ccdc_cleanup(isp);
2077 error_ccdc:
2078 omap3isp_ccp2_cleanup(isp);
2079 error_ccp2:
2080 omap3isp_csi2_cleanup(isp);
2081 error_csi2:
2082 error_csiphy:
2083 return ret;
2084 }
2085
isp_detach_iommu(struct isp_device * isp)2086 static void isp_detach_iommu(struct isp_device *isp)
2087 {
2088 arm_iommu_detach_device(isp->dev);
2089 arm_iommu_release_mapping(isp->mapping);
2090 isp->mapping = NULL;
2091 iommu_group_remove_device(isp->dev);
2092 }
2093
isp_attach_iommu(struct isp_device * isp)2094 static int isp_attach_iommu(struct isp_device *isp)
2095 {
2096 struct dma_iommu_mapping *mapping;
2097 struct iommu_group *group;
2098 int ret;
2099
2100 /* Create a device group and add the device to it. */
2101 group = iommu_group_alloc();
2102 if (IS_ERR(group)) {
2103 dev_err(isp->dev, "failed to allocate IOMMU group\n");
2104 return PTR_ERR(group);
2105 }
2106
2107 ret = iommu_group_add_device(group, isp->dev);
2108 iommu_group_put(group);
2109
2110 if (ret < 0) {
2111 dev_err(isp->dev, "failed to add device to IPMMU group\n");
2112 return ret;
2113 }
2114
2115 /*
2116 * Create the ARM mapping, used by the ARM DMA mapping core to allocate
2117 * VAs. This will allocate a corresponding IOMMU domain.
2118 */
2119 mapping = arm_iommu_create_mapping(&platform_bus_type, SZ_1G, SZ_2G);
2120 if (IS_ERR(mapping)) {
2121 dev_err(isp->dev, "failed to create ARM IOMMU mapping\n");
2122 return PTR_ERR(mapping);
2123 }
2124
2125 isp->mapping = mapping;
2126
2127 /* Attach the ARM VA mapping to the device. */
2128 ret = arm_iommu_attach_device(isp->dev, mapping);
2129 if (ret < 0) {
2130 dev_err(isp->dev, "failed to attach device to VA mapping\n");
2131 goto error;
2132 }
2133
2134 return 0;
2135
2136 error:
2137 arm_iommu_release_mapping(isp->mapping);
2138 isp->mapping = NULL;
2139 return ret;
2140 }
2141
2142 /*
2143 * isp_remove - Remove ISP platform device
2144 * @pdev: Pointer to ISP platform device
2145 *
2146 * Always returns 0.
2147 */
isp_remove(struct platform_device * pdev)2148 static int isp_remove(struct platform_device *pdev)
2149 {
2150 struct isp_device *isp = platform_get_drvdata(pdev);
2151
2152 v4l2_async_notifier_unregister(&isp->notifier);
2153 isp_unregister_entities(isp);
2154 isp_cleanup_modules(isp);
2155 isp_xclk_cleanup(isp);
2156
2157 __omap3isp_get(isp, false);
2158 isp_detach_iommu(isp);
2159 __omap3isp_put(isp, false);
2160
2161 return 0;
2162 }
2163
2164 enum isp_of_phy {
2165 ISP_OF_PHY_PARALLEL = 0,
2166 ISP_OF_PHY_CSIPHY1,
2167 ISP_OF_PHY_CSIPHY2,
2168 };
2169
isp_of_parse_node(struct device * dev,struct device_node * node,struct isp_async_subdev * isd)2170 static int isp_of_parse_node(struct device *dev, struct device_node *node,
2171 struct isp_async_subdev *isd)
2172 {
2173 struct isp_bus_cfg *buscfg = &isd->bus;
2174 struct v4l2_of_endpoint vep;
2175 unsigned int i;
2176
2177 v4l2_of_parse_endpoint(node, &vep);
2178
2179 dev_dbg(dev, "parsing endpoint %s, interface %u\n", node->full_name,
2180 vep.base.port);
2181
2182 switch (vep.base.port) {
2183 case ISP_OF_PHY_PARALLEL:
2184 buscfg->interface = ISP_INTERFACE_PARALLEL;
2185 buscfg->bus.parallel.data_lane_shift =
2186 vep.bus.parallel.data_shift;
2187 buscfg->bus.parallel.clk_pol =
2188 !!(vep.bus.parallel.flags
2189 & V4L2_MBUS_PCLK_SAMPLE_FALLING);
2190 buscfg->bus.parallel.hs_pol =
2191 !!(vep.bus.parallel.flags & V4L2_MBUS_VSYNC_ACTIVE_LOW);
2192 buscfg->bus.parallel.vs_pol =
2193 !!(vep.bus.parallel.flags & V4L2_MBUS_HSYNC_ACTIVE_LOW);
2194 buscfg->bus.parallel.fld_pol =
2195 !!(vep.bus.parallel.flags & V4L2_MBUS_FIELD_EVEN_LOW);
2196 buscfg->bus.parallel.data_pol =
2197 !!(vep.bus.parallel.flags & V4L2_MBUS_DATA_ACTIVE_LOW);
2198 break;
2199
2200 case ISP_OF_PHY_CSIPHY1:
2201 case ISP_OF_PHY_CSIPHY2:
2202 /* FIXME: always assume CSI-2 for now. */
2203 switch (vep.base.port) {
2204 case ISP_OF_PHY_CSIPHY1:
2205 buscfg->interface = ISP_INTERFACE_CSI2C_PHY1;
2206 break;
2207 case ISP_OF_PHY_CSIPHY2:
2208 buscfg->interface = ISP_INTERFACE_CSI2A_PHY2;
2209 break;
2210 }
2211 buscfg->bus.csi2.lanecfg.clk.pos = vep.bus.mipi_csi2.clock_lane;
2212 buscfg->bus.csi2.lanecfg.clk.pol =
2213 vep.bus.mipi_csi2.lane_polarities[0];
2214 dev_dbg(dev, "clock lane polarity %u, pos %u\n",
2215 buscfg->bus.csi2.lanecfg.clk.pol,
2216 buscfg->bus.csi2.lanecfg.clk.pos);
2217
2218 for (i = 0; i < ISP_CSIPHY2_NUM_DATA_LANES; i++) {
2219 buscfg->bus.csi2.lanecfg.data[i].pos =
2220 vep.bus.mipi_csi2.data_lanes[i];
2221 buscfg->bus.csi2.lanecfg.data[i].pol =
2222 vep.bus.mipi_csi2.lane_polarities[i + 1];
2223 dev_dbg(dev, "data lane %u polarity %u, pos %u\n", i,
2224 buscfg->bus.csi2.lanecfg.data[i].pol,
2225 buscfg->bus.csi2.lanecfg.data[i].pos);
2226 }
2227
2228 /*
2229 * FIXME: now we assume the CRC is always there.
2230 * Implement a way to obtain this information from the
2231 * sensor. Frame descriptors, perhaps?
2232 */
2233 buscfg->bus.csi2.crc = 1;
2234 break;
2235
2236 default:
2237 dev_warn(dev, "%s: invalid interface %u\n", node->full_name,
2238 vep.base.port);
2239 break;
2240 }
2241
2242 return 0;
2243 }
2244
isp_of_parse_nodes(struct device * dev,struct v4l2_async_notifier * notifier)2245 static int isp_of_parse_nodes(struct device *dev,
2246 struct v4l2_async_notifier *notifier)
2247 {
2248 struct device_node *node = NULL;
2249
2250 notifier->subdevs = devm_kcalloc(
2251 dev, ISP_MAX_SUBDEVS, sizeof(*notifier->subdevs), GFP_KERNEL);
2252 if (!notifier->subdevs)
2253 return -ENOMEM;
2254
2255 while (notifier->num_subdevs < ISP_MAX_SUBDEVS &&
2256 (node = of_graph_get_next_endpoint(dev->of_node, node))) {
2257 struct isp_async_subdev *isd;
2258
2259 isd = devm_kzalloc(dev, sizeof(*isd), GFP_KERNEL);
2260 if (!isd) {
2261 of_node_put(node);
2262 return -ENOMEM;
2263 }
2264
2265 notifier->subdevs[notifier->num_subdevs] = &isd->asd;
2266
2267 if (isp_of_parse_node(dev, node, isd)) {
2268 of_node_put(node);
2269 return -EINVAL;
2270 }
2271
2272 isd->asd.match.of.node = of_graph_get_remote_port_parent(node);
2273 of_node_put(node);
2274 if (!isd->asd.match.of.node) {
2275 dev_warn(dev, "bad remote port parent\n");
2276 return -EINVAL;
2277 }
2278
2279 isd->asd.match_type = V4L2_ASYNC_MATCH_OF;
2280 notifier->num_subdevs++;
2281 }
2282
2283 return notifier->num_subdevs;
2284 }
2285
isp_subdev_notifier_bound(struct v4l2_async_notifier * async,struct v4l2_subdev * subdev,struct v4l2_async_subdev * asd)2286 static int isp_subdev_notifier_bound(struct v4l2_async_notifier *async,
2287 struct v4l2_subdev *subdev,
2288 struct v4l2_async_subdev *asd)
2289 {
2290 struct isp_device *isp = container_of(async, struct isp_device,
2291 notifier);
2292 struct isp_async_subdev *isd =
2293 container_of(asd, struct isp_async_subdev, asd);
2294 int ret;
2295
2296 ret = isp_link_entity(isp, &subdev->entity, isd->bus.interface);
2297 if (ret < 0)
2298 return ret;
2299
2300 isd->sd = subdev;
2301 isd->sd->host_priv = &isd->bus;
2302
2303 return ret;
2304 }
2305
isp_subdev_notifier_complete(struct v4l2_async_notifier * async)2306 static int isp_subdev_notifier_complete(struct v4l2_async_notifier *async)
2307 {
2308 struct isp_device *isp = container_of(async, struct isp_device,
2309 notifier);
2310
2311 return v4l2_device_register_subdev_nodes(&isp->v4l2_dev);
2312 }
2313
2314 /*
2315 * isp_probe - Probe ISP platform device
2316 * @pdev: Pointer to ISP platform device
2317 *
2318 * Returns 0 if successful,
2319 * -ENOMEM if no memory available,
2320 * -ENODEV if no platform device resources found
2321 * or no space for remapping registers,
2322 * -EINVAL if couldn't install ISR,
2323 * or clk_get return error value.
2324 */
isp_probe(struct platform_device * pdev)2325 static int isp_probe(struct platform_device *pdev)
2326 {
2327 struct isp_device *isp;
2328 struct resource *mem;
2329 int ret;
2330 int i, m;
2331
2332 isp = devm_kzalloc(&pdev->dev, sizeof(*isp), GFP_KERNEL);
2333 if (!isp) {
2334 dev_err(&pdev->dev, "could not allocate memory\n");
2335 return -ENOMEM;
2336 }
2337
2338 ret = of_property_read_u32(pdev->dev.of_node, "ti,phy-type",
2339 &isp->phy_type);
2340 if (ret)
2341 return ret;
2342
2343 isp->syscon = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
2344 "syscon");
2345 if (IS_ERR(isp->syscon))
2346 return PTR_ERR(isp->syscon);
2347
2348 ret = of_property_read_u32_index(pdev->dev.of_node, "syscon", 1,
2349 &isp->syscon_offset);
2350 if (ret)
2351 return ret;
2352
2353 ret = isp_of_parse_nodes(&pdev->dev, &isp->notifier);
2354 if (ret < 0)
2355 return ret;
2356
2357 isp->autoidle = autoidle;
2358
2359 mutex_init(&isp->isp_mutex);
2360 spin_lock_init(&isp->stat_lock);
2361
2362 isp->dev = &pdev->dev;
2363 isp->ref_count = 0;
2364
2365 ret = dma_coerce_mask_and_coherent(isp->dev, DMA_BIT_MASK(32));
2366 if (ret)
2367 goto error;
2368
2369 platform_set_drvdata(pdev, isp);
2370
2371 /* Regulators */
2372 isp->isp_csiphy1.vdd = devm_regulator_get(&pdev->dev, "vdd-csiphy1");
2373 isp->isp_csiphy2.vdd = devm_regulator_get(&pdev->dev, "vdd-csiphy2");
2374
2375 /* Clocks
2376 *
2377 * The ISP clock tree is revision-dependent. We thus need to enable ICLK
2378 * manually to read the revision before calling __omap3isp_get().
2379 *
2380 * Start by mapping the ISP MMIO area, which is in two pieces.
2381 * The ISP IOMMU is in between. Map both now, and fill in the
2382 * ISP revision specific portions a little later in the
2383 * function.
2384 */
2385 for (i = 0; i < 2; i++) {
2386 unsigned int map_idx = i ? OMAP3_ISP_IOMEM_CSI2A_REGS1 : 0;
2387
2388 mem = platform_get_resource(pdev, IORESOURCE_MEM, i);
2389 isp->mmio_base[map_idx] =
2390 devm_ioremap_resource(isp->dev, mem);
2391 if (IS_ERR(isp->mmio_base[map_idx])) {
2392 ret = PTR_ERR(isp->mmio_base[map_idx]);
2393 goto error;
2394 }
2395 }
2396
2397 ret = isp_get_clocks(isp);
2398 if (ret < 0)
2399 goto error;
2400
2401 ret = clk_enable(isp->clock[ISP_CLK_CAM_ICK]);
2402 if (ret < 0)
2403 goto error;
2404
2405 isp->revision = isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_REVISION);
2406 dev_info(isp->dev, "Revision %d.%d found\n",
2407 (isp->revision & 0xf0) >> 4, isp->revision & 0x0f);
2408
2409 clk_disable(isp->clock[ISP_CLK_CAM_ICK]);
2410
2411 if (__omap3isp_get(isp, false) == NULL) {
2412 ret = -ENODEV;
2413 goto error;
2414 }
2415
2416 ret = isp_reset(isp);
2417 if (ret < 0)
2418 goto error_isp;
2419
2420 ret = isp_xclk_init(isp);
2421 if (ret < 0)
2422 goto error_isp;
2423
2424 /* Memory resources */
2425 for (m = 0; m < ARRAY_SIZE(isp_res_maps); m++)
2426 if (isp->revision == isp_res_maps[m].isp_rev)
2427 break;
2428
2429 if (m == ARRAY_SIZE(isp_res_maps)) {
2430 dev_err(isp->dev, "No resource map found for ISP rev %d.%d\n",
2431 (isp->revision & 0xf0) >> 4, isp->revision & 0xf);
2432 ret = -ENODEV;
2433 goto error_isp;
2434 }
2435
2436 for (i = 1; i < OMAP3_ISP_IOMEM_CSI2A_REGS1; i++)
2437 isp->mmio_base[i] =
2438 isp->mmio_base[0] + isp_res_maps[m].offset[i];
2439
2440 for (i = OMAP3_ISP_IOMEM_CSIPHY2; i < OMAP3_ISP_IOMEM_LAST; i++)
2441 isp->mmio_base[i] =
2442 isp->mmio_base[OMAP3_ISP_IOMEM_CSI2A_REGS1]
2443 + isp_res_maps[m].offset[i];
2444
2445 isp->mmio_hist_base_phys =
2446 mem->start + isp_res_maps[m].offset[OMAP3_ISP_IOMEM_HIST];
2447
2448 /* IOMMU */
2449 ret = isp_attach_iommu(isp);
2450 if (ret < 0) {
2451 dev_err(&pdev->dev, "unable to attach to IOMMU\n");
2452 goto error_isp;
2453 }
2454
2455 /* Interrupt */
2456 isp->irq_num = platform_get_irq(pdev, 0);
2457 if (isp->irq_num <= 0) {
2458 dev_err(isp->dev, "No IRQ resource\n");
2459 ret = -ENODEV;
2460 goto error_iommu;
2461 }
2462
2463 if (devm_request_irq(isp->dev, isp->irq_num, isp_isr, IRQF_SHARED,
2464 "OMAP3 ISP", isp)) {
2465 dev_err(isp->dev, "Unable to request IRQ\n");
2466 ret = -EINVAL;
2467 goto error_iommu;
2468 }
2469
2470 /* Entities */
2471 ret = isp_initialize_modules(isp);
2472 if (ret < 0)
2473 goto error_iommu;
2474
2475 ret = isp_register_entities(isp);
2476 if (ret < 0)
2477 goto error_modules;
2478
2479 isp->notifier.bound = isp_subdev_notifier_bound;
2480 isp->notifier.complete = isp_subdev_notifier_complete;
2481
2482 ret = v4l2_async_notifier_register(&isp->v4l2_dev, &isp->notifier);
2483 if (ret)
2484 goto error_register_entities;
2485
2486 isp_core_init(isp, 1);
2487 omap3isp_put(isp);
2488
2489 return 0;
2490
2491 error_register_entities:
2492 isp_unregister_entities(isp);
2493 error_modules:
2494 isp_cleanup_modules(isp);
2495 error_iommu:
2496 isp_detach_iommu(isp);
2497 error_isp:
2498 isp_xclk_cleanup(isp);
2499 __omap3isp_put(isp, false);
2500 error:
2501 mutex_destroy(&isp->isp_mutex);
2502
2503 return ret;
2504 }
2505
2506 static const struct dev_pm_ops omap3isp_pm_ops = {
2507 .prepare = isp_pm_prepare,
2508 .suspend = isp_pm_suspend,
2509 .resume = isp_pm_resume,
2510 .complete = isp_pm_complete,
2511 };
2512
2513 static struct platform_device_id omap3isp_id_table[] = {
2514 { "omap3isp", 0 },
2515 { },
2516 };
2517 MODULE_DEVICE_TABLE(platform, omap3isp_id_table);
2518
2519 static const struct of_device_id omap3isp_of_table[] = {
2520 { .compatible = "ti,omap3-isp" },
2521 { },
2522 };
2523
2524 static struct platform_driver omap3isp_driver = {
2525 .probe = isp_probe,
2526 .remove = isp_remove,
2527 .id_table = omap3isp_id_table,
2528 .driver = {
2529 .name = "omap3isp",
2530 .pm = &omap3isp_pm_ops,
2531 .of_match_table = omap3isp_of_table,
2532 },
2533 };
2534
2535 module_platform_driver(omap3isp_driver);
2536
2537 MODULE_AUTHOR("Nokia Corporation");
2538 MODULE_DESCRIPTION("TI OMAP3 ISP driver");
2539 MODULE_LICENSE("GPL");
2540 MODULE_VERSION(ISP_VIDEO_DRIVER_VERSION);
2541