• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * isphist.c
3  *
4  * TI OMAP3 ISP - Histogram module
5  *
6  * Copyright (C) 2010 Nokia Corporation
7  * Copyright (C) 2009 Texas Instruments, Inc.
8  *
9  * Contacts: David Cohen <dacohen@gmail.com>
10  *	     Laurent Pinchart <laurent.pinchart@ideasonboard.com>
11  *	     Sakari Ailus <sakari.ailus@iki.fi>
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License version 2 as
15  * published by the Free Software Foundation.
16  */
17 
18 #include <linux/delay.h>
19 #include <linux/device.h>
20 #include <linux/dmaengine.h>
21 #include <linux/omap-dmaengine.h>
22 #include <linux/slab.h>
23 #include <linux/uaccess.h>
24 
25 #include "isp.h"
26 #include "ispreg.h"
27 #include "isphist.h"
28 
29 #define HIST_CONFIG_DMA	1
30 
31 /*
32  * hist_reset_mem - clear Histogram memory before start stats engine.
33  */
hist_reset_mem(struct ispstat * hist)34 static void hist_reset_mem(struct ispstat *hist)
35 {
36 	struct isp_device *isp = hist->isp;
37 	struct omap3isp_hist_config *conf = hist->priv;
38 	unsigned int i;
39 
40 	isp_reg_writel(isp, 0, OMAP3_ISP_IOMEM_HIST, ISPHIST_ADDR);
41 
42 	/*
43 	 * By setting it, the histogram internal buffer is being cleared at the
44 	 * same time it's being read. This bit must be cleared afterwards.
45 	 */
46 	isp_reg_set(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT, ISPHIST_CNT_CLEAR);
47 
48 	/*
49 	 * We'll clear 4 words at each iteration for optimization. It avoids
50 	 * 3/4 of the jumps. We also know HIST_MEM_SIZE is divisible by 4.
51 	 */
52 	for (i = OMAP3ISP_HIST_MEM_SIZE / 4; i > 0; i--) {
53 		isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
54 		isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
55 		isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
56 		isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
57 	}
58 	isp_reg_clr(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT, ISPHIST_CNT_CLEAR);
59 
60 	hist->wait_acc_frames = conf->num_acc_frames;
61 }
62 
63 /*
64  * hist_setup_regs - Helper function to update Histogram registers.
65  */
hist_setup_regs(struct ispstat * hist,void * priv)66 static void hist_setup_regs(struct ispstat *hist, void *priv)
67 {
68 	struct isp_device *isp = hist->isp;
69 	struct omap3isp_hist_config *conf = priv;
70 	int c;
71 	u32 cnt;
72 	u32 wb_gain;
73 	u32 reg_hor[OMAP3ISP_HIST_MAX_REGIONS];
74 	u32 reg_ver[OMAP3ISP_HIST_MAX_REGIONS];
75 
76 	if (!hist->update || hist->state == ISPSTAT_DISABLED ||
77 	    hist->state == ISPSTAT_DISABLING)
78 		return;
79 
80 	cnt = conf->cfa << ISPHIST_CNT_CFA_SHIFT;
81 
82 	wb_gain = conf->wg[0] << ISPHIST_WB_GAIN_WG00_SHIFT;
83 	wb_gain |= conf->wg[1] << ISPHIST_WB_GAIN_WG01_SHIFT;
84 	wb_gain |= conf->wg[2] << ISPHIST_WB_GAIN_WG02_SHIFT;
85 	if (conf->cfa == OMAP3ISP_HIST_CFA_BAYER)
86 		wb_gain |= conf->wg[3] << ISPHIST_WB_GAIN_WG03_SHIFT;
87 
88 	/* Regions size and position */
89 	for (c = 0; c < OMAP3ISP_HIST_MAX_REGIONS; c++) {
90 		if (c < conf->num_regions) {
91 			reg_hor[c] = (conf->region[c].h_start <<
92 				     ISPHIST_REG_START_SHIFT)
93 				   | (conf->region[c].h_end <<
94 				     ISPHIST_REG_END_SHIFT);
95 			reg_ver[c] = (conf->region[c].v_start <<
96 				     ISPHIST_REG_START_SHIFT)
97 				   | (conf->region[c].v_end <<
98 				     ISPHIST_REG_END_SHIFT);
99 		} else {
100 			reg_hor[c] = 0;
101 			reg_ver[c] = 0;
102 		}
103 	}
104 
105 	cnt |= conf->hist_bins << ISPHIST_CNT_BINS_SHIFT;
106 	switch (conf->hist_bins) {
107 	case OMAP3ISP_HIST_BINS_256:
108 		cnt |= (ISPHIST_IN_BIT_WIDTH_CCDC - 8) <<
109 			ISPHIST_CNT_SHIFT_SHIFT;
110 		break;
111 	case OMAP3ISP_HIST_BINS_128:
112 		cnt |= (ISPHIST_IN_BIT_WIDTH_CCDC - 7) <<
113 			ISPHIST_CNT_SHIFT_SHIFT;
114 		break;
115 	case OMAP3ISP_HIST_BINS_64:
116 		cnt |= (ISPHIST_IN_BIT_WIDTH_CCDC - 6) <<
117 			ISPHIST_CNT_SHIFT_SHIFT;
118 		break;
119 	default: /* OMAP3ISP_HIST_BINS_32 */
120 		cnt |= (ISPHIST_IN_BIT_WIDTH_CCDC - 5) <<
121 			ISPHIST_CNT_SHIFT_SHIFT;
122 		break;
123 	}
124 
125 	hist_reset_mem(hist);
126 
127 	isp_reg_writel(isp, cnt, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT);
128 	isp_reg_writel(isp, wb_gain,  OMAP3_ISP_IOMEM_HIST, ISPHIST_WB_GAIN);
129 	isp_reg_writel(isp, reg_hor[0], OMAP3_ISP_IOMEM_HIST, ISPHIST_R0_HORZ);
130 	isp_reg_writel(isp, reg_ver[0], OMAP3_ISP_IOMEM_HIST, ISPHIST_R0_VERT);
131 	isp_reg_writel(isp, reg_hor[1], OMAP3_ISP_IOMEM_HIST, ISPHIST_R1_HORZ);
132 	isp_reg_writel(isp, reg_ver[1], OMAP3_ISP_IOMEM_HIST, ISPHIST_R1_VERT);
133 	isp_reg_writel(isp, reg_hor[2], OMAP3_ISP_IOMEM_HIST, ISPHIST_R2_HORZ);
134 	isp_reg_writel(isp, reg_ver[2], OMAP3_ISP_IOMEM_HIST, ISPHIST_R2_VERT);
135 	isp_reg_writel(isp, reg_hor[3], OMAP3_ISP_IOMEM_HIST, ISPHIST_R3_HORZ);
136 	isp_reg_writel(isp, reg_ver[3], OMAP3_ISP_IOMEM_HIST, ISPHIST_R3_VERT);
137 
138 	hist->update = 0;
139 	hist->config_counter += hist->inc_config;
140 	hist->inc_config = 0;
141 	hist->buf_size = conf->buf_size;
142 }
143 
hist_enable(struct ispstat * hist,int enable)144 static void hist_enable(struct ispstat *hist, int enable)
145 {
146 	if (enable) {
147 		isp_reg_set(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_PCR,
148 			    ISPHIST_PCR_ENABLE);
149 		omap3isp_subclk_enable(hist->isp, OMAP3_ISP_SUBCLK_HIST);
150 	} else {
151 		isp_reg_clr(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_PCR,
152 			    ISPHIST_PCR_ENABLE);
153 		omap3isp_subclk_disable(hist->isp, OMAP3_ISP_SUBCLK_HIST);
154 	}
155 }
156 
hist_busy(struct ispstat * hist)157 static int hist_busy(struct ispstat *hist)
158 {
159 	return isp_reg_readl(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_PCR)
160 						& ISPHIST_PCR_BUSY;
161 }
162 
hist_dma_cb(void * data)163 static void hist_dma_cb(void *data)
164 {
165 	struct ispstat *hist = data;
166 
167 	/* FIXME: The DMA engine API can't report transfer errors :-/ */
168 
169 	isp_reg_clr(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT,
170 		    ISPHIST_CNT_CLEAR);
171 
172 	omap3isp_stat_dma_isr(hist);
173 	if (hist->state != ISPSTAT_DISABLED)
174 		omap3isp_hist_dma_done(hist->isp);
175 }
176 
hist_buf_dma(struct ispstat * hist)177 static int hist_buf_dma(struct ispstat *hist)
178 {
179 	dma_addr_t dma_addr = hist->active_buf->dma_addr;
180 	struct dma_async_tx_descriptor *tx;
181 	struct dma_slave_config cfg;
182 	dma_cookie_t cookie;
183 	int ret;
184 
185 	if (unlikely(!dma_addr)) {
186 		dev_dbg(hist->isp->dev, "hist: invalid DMA buffer address\n");
187 		goto error;
188 	}
189 
190 	isp_reg_writel(hist->isp, 0, OMAP3_ISP_IOMEM_HIST, ISPHIST_ADDR);
191 	isp_reg_set(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT,
192 		    ISPHIST_CNT_CLEAR);
193 	omap3isp_flush(hist->isp);
194 
195 	memset(&cfg, 0, sizeof(cfg));
196 	cfg.src_addr = hist->isp->mmio_hist_base_phys + ISPHIST_DATA;
197 	cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
198 	cfg.src_maxburst = hist->buf_size / 4;
199 
200 	ret = dmaengine_slave_config(hist->dma_ch, &cfg);
201 	if (ret < 0) {
202 		dev_dbg(hist->isp->dev,
203 			"hist: DMA slave configuration failed\n");
204 		goto error;
205 	}
206 
207 	tx = dmaengine_prep_slave_single(hist->dma_ch, dma_addr,
208 					 hist->buf_size, DMA_DEV_TO_MEM,
209 					 DMA_CTRL_ACK);
210 	if (tx == NULL) {
211 		dev_dbg(hist->isp->dev,
212 			"hist: DMA slave preparation failed\n");
213 		goto error;
214 	}
215 
216 	tx->callback = hist_dma_cb;
217 	tx->callback_param = hist;
218 	cookie = tx->tx_submit(tx);
219 	if (dma_submit_error(cookie)) {
220 		dev_dbg(hist->isp->dev, "hist: DMA submission failed\n");
221 		goto error;
222 	}
223 
224 	dma_async_issue_pending(hist->dma_ch);
225 
226 	return STAT_BUF_WAITING_DMA;
227 
228 error:
229 	hist_reset_mem(hist);
230 	return STAT_NO_BUF;
231 }
232 
hist_buf_pio(struct ispstat * hist)233 static int hist_buf_pio(struct ispstat *hist)
234 {
235 	struct isp_device *isp = hist->isp;
236 	u32 *buf = hist->active_buf->virt_addr;
237 	unsigned int i;
238 
239 	if (!buf) {
240 		dev_dbg(isp->dev, "hist: invalid PIO buffer address\n");
241 		hist_reset_mem(hist);
242 		return STAT_NO_BUF;
243 	}
244 
245 	isp_reg_writel(isp, 0, OMAP3_ISP_IOMEM_HIST, ISPHIST_ADDR);
246 
247 	/*
248 	 * By setting it, the histogram internal buffer is being cleared at the
249 	 * same time it's being read. This bit must be cleared just after all
250 	 * data is acquired.
251 	 */
252 	isp_reg_set(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT, ISPHIST_CNT_CLEAR);
253 
254 	/*
255 	 * We'll read 4 times a 4-bytes-word at each iteration for
256 	 * optimization. It avoids 3/4 of the jumps. We also know buf_size is
257 	 * divisible by 16.
258 	 */
259 	for (i = hist->buf_size / 16; i > 0; i--) {
260 		*buf++ = isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
261 		*buf++ = isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
262 		*buf++ = isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
263 		*buf++ = isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
264 	}
265 	isp_reg_clr(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT,
266 		    ISPHIST_CNT_CLEAR);
267 
268 	return STAT_BUF_DONE;
269 }
270 
271 /*
272  * hist_buf_process - Callback from ISP driver for HIST interrupt.
273  */
hist_buf_process(struct ispstat * hist)274 static int hist_buf_process(struct ispstat *hist)
275 {
276 	struct omap3isp_hist_config *user_cfg = hist->priv;
277 	int ret;
278 
279 	if (atomic_read(&hist->buf_err) || hist->state != ISPSTAT_ENABLED) {
280 		hist_reset_mem(hist);
281 		return STAT_NO_BUF;
282 	}
283 
284 	if (--(hist->wait_acc_frames))
285 		return STAT_NO_BUF;
286 
287 	if (hist->dma_ch)
288 		ret = hist_buf_dma(hist);
289 	else
290 		ret = hist_buf_pio(hist);
291 
292 	hist->wait_acc_frames = user_cfg->num_acc_frames;
293 
294 	return ret;
295 }
296 
hist_get_buf_size(struct omap3isp_hist_config * conf)297 static u32 hist_get_buf_size(struct omap3isp_hist_config *conf)
298 {
299 	return OMAP3ISP_HIST_MEM_SIZE_BINS(conf->hist_bins) * conf->num_regions;
300 }
301 
302 /*
303  * hist_validate_params - Helper function to check user given params.
304  * @new_conf: Pointer to user configuration structure.
305  *
306  * Returns 0 on success configuration.
307  */
hist_validate_params(struct ispstat * hist,void * new_conf)308 static int hist_validate_params(struct ispstat *hist, void *new_conf)
309 {
310 	struct omap3isp_hist_config *user_cfg = new_conf;
311 	int c;
312 	u32 buf_size;
313 
314 	if (user_cfg->cfa > OMAP3ISP_HIST_CFA_FOVEONX3)
315 		return -EINVAL;
316 
317 	/* Regions size and position */
318 
319 	if ((user_cfg->num_regions < OMAP3ISP_HIST_MIN_REGIONS) ||
320 	    (user_cfg->num_regions > OMAP3ISP_HIST_MAX_REGIONS))
321 		return -EINVAL;
322 
323 	/* Regions */
324 	for (c = 0; c < user_cfg->num_regions; c++) {
325 		if (user_cfg->region[c].h_start & ~ISPHIST_REG_START_END_MASK)
326 			return -EINVAL;
327 		if (user_cfg->region[c].h_end & ~ISPHIST_REG_START_END_MASK)
328 			return -EINVAL;
329 		if (user_cfg->region[c].v_start & ~ISPHIST_REG_START_END_MASK)
330 			return -EINVAL;
331 		if (user_cfg->region[c].v_end & ~ISPHIST_REG_START_END_MASK)
332 			return -EINVAL;
333 		if (user_cfg->region[c].h_start > user_cfg->region[c].h_end)
334 			return -EINVAL;
335 		if (user_cfg->region[c].v_start > user_cfg->region[c].v_end)
336 			return -EINVAL;
337 	}
338 
339 	switch (user_cfg->num_regions) {
340 	case 1:
341 		if (user_cfg->hist_bins > OMAP3ISP_HIST_BINS_256)
342 			return -EINVAL;
343 		break;
344 	case 2:
345 		if (user_cfg->hist_bins > OMAP3ISP_HIST_BINS_128)
346 			return -EINVAL;
347 		break;
348 	default: /* 3 or 4 */
349 		if (user_cfg->hist_bins > OMAP3ISP_HIST_BINS_64)
350 			return -EINVAL;
351 		break;
352 	}
353 
354 	buf_size = hist_get_buf_size(user_cfg);
355 	if (buf_size > user_cfg->buf_size)
356 		/* User's buf_size request wasn't enough */
357 		user_cfg->buf_size = buf_size;
358 	else if (user_cfg->buf_size > OMAP3ISP_HIST_MAX_BUF_SIZE)
359 		user_cfg->buf_size = OMAP3ISP_HIST_MAX_BUF_SIZE;
360 
361 	return 0;
362 }
363 
hist_comp_params(struct ispstat * hist,struct omap3isp_hist_config * user_cfg)364 static int hist_comp_params(struct ispstat *hist,
365 			    struct omap3isp_hist_config *user_cfg)
366 {
367 	struct omap3isp_hist_config *cur_cfg = hist->priv;
368 	int c;
369 
370 	if (cur_cfg->cfa != user_cfg->cfa)
371 		return 1;
372 
373 	if (cur_cfg->num_acc_frames != user_cfg->num_acc_frames)
374 		return 1;
375 
376 	if (cur_cfg->hist_bins != user_cfg->hist_bins)
377 		return 1;
378 
379 	for (c = 0; c < OMAP3ISP_HIST_MAX_WG; c++) {
380 		if (c == 3 && user_cfg->cfa == OMAP3ISP_HIST_CFA_FOVEONX3)
381 			break;
382 		else if (cur_cfg->wg[c] != user_cfg->wg[c])
383 			return 1;
384 	}
385 
386 	if (cur_cfg->num_regions != user_cfg->num_regions)
387 		return 1;
388 
389 	/* Regions */
390 	for (c = 0; c < user_cfg->num_regions; c++) {
391 		if (cur_cfg->region[c].h_start != user_cfg->region[c].h_start)
392 			return 1;
393 		if (cur_cfg->region[c].h_end != user_cfg->region[c].h_end)
394 			return 1;
395 		if (cur_cfg->region[c].v_start != user_cfg->region[c].v_start)
396 			return 1;
397 		if (cur_cfg->region[c].v_end != user_cfg->region[c].v_end)
398 			return 1;
399 	}
400 
401 	return 0;
402 }
403 
404 /*
405  * hist_update_params - Helper function to check and store user given params.
406  * @new_conf: Pointer to user configuration structure.
407  */
hist_set_params(struct ispstat * hist,void * new_conf)408 static void hist_set_params(struct ispstat *hist, void *new_conf)
409 {
410 	struct omap3isp_hist_config *user_cfg = new_conf;
411 	struct omap3isp_hist_config *cur_cfg = hist->priv;
412 
413 	if (!hist->configured || hist_comp_params(hist, user_cfg)) {
414 		memcpy(cur_cfg, user_cfg, sizeof(*user_cfg));
415 		if (user_cfg->num_acc_frames == 0)
416 			user_cfg->num_acc_frames = 1;
417 		hist->inc_config++;
418 		hist->update = 1;
419 		/*
420 		 * User might be asked for a bigger buffer than necessary for
421 		 * this configuration. In order to return the right amount of
422 		 * data during buffer request, let's calculate the size here
423 		 * instead of stick with user_cfg->buf_size.
424 		 */
425 		cur_cfg->buf_size = hist_get_buf_size(cur_cfg);
426 
427 	}
428 }
429 
hist_ioctl(struct v4l2_subdev * sd,unsigned int cmd,void * arg)430 static long hist_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
431 {
432 	struct ispstat *stat = v4l2_get_subdevdata(sd);
433 
434 	switch (cmd) {
435 	case VIDIOC_OMAP3ISP_HIST_CFG:
436 		return omap3isp_stat_config(stat, arg);
437 	case VIDIOC_OMAP3ISP_STAT_REQ:
438 		return omap3isp_stat_request_statistics(stat, arg);
439 	case VIDIOC_OMAP3ISP_STAT_EN: {
440 		int *en = arg;
441 		return omap3isp_stat_enable(stat, !!*en);
442 	}
443 	}
444 
445 	return -ENOIOCTLCMD;
446 
447 }
448 
449 static const struct ispstat_ops hist_ops = {
450 	.validate_params	= hist_validate_params,
451 	.set_params		= hist_set_params,
452 	.setup_regs		= hist_setup_regs,
453 	.enable			= hist_enable,
454 	.busy			= hist_busy,
455 	.buf_process		= hist_buf_process,
456 };
457 
458 static const struct v4l2_subdev_core_ops hist_subdev_core_ops = {
459 	.ioctl = hist_ioctl,
460 	.subscribe_event = omap3isp_stat_subscribe_event,
461 	.unsubscribe_event = omap3isp_stat_unsubscribe_event,
462 };
463 
464 static const struct v4l2_subdev_video_ops hist_subdev_video_ops = {
465 	.s_stream = omap3isp_stat_s_stream,
466 };
467 
468 static const struct v4l2_subdev_ops hist_subdev_ops = {
469 	.core = &hist_subdev_core_ops,
470 	.video = &hist_subdev_video_ops,
471 };
472 
473 /*
474  * omap3isp_hist_init - Module Initialization.
475  */
omap3isp_hist_init(struct isp_device * isp)476 int omap3isp_hist_init(struct isp_device *isp)
477 {
478 	struct ispstat *hist = &isp->isp_hist;
479 	struct omap3isp_hist_config *hist_cfg;
480 	int ret = -1;
481 
482 	hist_cfg = devm_kzalloc(isp->dev, sizeof(*hist_cfg), GFP_KERNEL);
483 	if (hist_cfg == NULL)
484 		return -ENOMEM;
485 
486 	hist->isp = isp;
487 
488 	if (HIST_CONFIG_DMA) {
489 		struct platform_device *pdev = to_platform_device(isp->dev);
490 		struct resource *res;
491 		unsigned int sig = 0;
492 		dma_cap_mask_t mask;
493 
494 		dma_cap_zero(mask);
495 		dma_cap_set(DMA_SLAVE, mask);
496 
497 		res = platform_get_resource_byname(pdev, IORESOURCE_DMA,
498 						   "hist");
499 		if (res)
500 			sig = res->start;
501 
502 		hist->dma_ch = dma_request_slave_channel_compat(mask,
503 				omap_dma_filter_fn, &sig, isp->dev, "hist");
504 		if (!hist->dma_ch)
505 			dev_warn(isp->dev,
506 				 "hist: DMA channel request failed, using PIO\n");
507 		else
508 			dev_dbg(isp->dev, "hist: using DMA channel %s\n",
509 				dma_chan_name(hist->dma_ch));
510 	}
511 
512 	hist->ops = &hist_ops;
513 	hist->priv = hist_cfg;
514 	hist->event_type = V4L2_EVENT_OMAP3ISP_HIST;
515 
516 	ret = omap3isp_stat_init(hist, "histogram", &hist_subdev_ops);
517 	if (ret) {
518 		if (hist->dma_ch)
519 			dma_release_channel(hist->dma_ch);
520 	}
521 
522 	return ret;
523 }
524 
525 /*
526  * omap3isp_hist_cleanup - Module cleanup.
527  */
omap3isp_hist_cleanup(struct isp_device * isp)528 void omap3isp_hist_cleanup(struct isp_device *isp)
529 {
530 	struct ispstat *hist = &isp->isp_hist;
531 
532 	if (hist->dma_ch)
533 		dma_release_channel(hist->dma_ch);
534 
535 	omap3isp_stat_cleanup(hist);
536 }
537