• Home
  • Raw
  • Download

Lines Matching refs:stat

35 #define IS_COHERENT_BUF(stat)	((stat)->dma_ch >= 0)  argument
69 #define IS_H3A_AF(stat) ((stat) == &(stat)->isp->isp_af) argument
70 #define IS_H3A_AEWB(stat) ((stat) == &(stat)->isp->isp_aewb) argument
71 #define IS_H3A(stat) (IS_H3A_AF(stat) || IS_H3A_AEWB(stat)) argument
73 static void __isp_stat_buf_sync_magic(struct ispstat *stat, in __isp_stat_buf_sync_magic() argument
80 struct device *dev = stat->isp->dev; in __isp_stat_buf_sync_magic()
97 static void isp_stat_buf_sync_magic_for_device(struct ispstat *stat, in isp_stat_buf_sync_magic_for_device() argument
102 if (IS_COHERENT_BUF(stat)) in isp_stat_buf_sync_magic_for_device()
105 __isp_stat_buf_sync_magic(stat, buf, buf_size, dir, in isp_stat_buf_sync_magic_for_device()
109 static void isp_stat_buf_sync_magic_for_cpu(struct ispstat *stat, in isp_stat_buf_sync_magic_for_cpu() argument
114 if (IS_COHERENT_BUF(stat)) in isp_stat_buf_sync_magic_for_cpu()
117 __isp_stat_buf_sync_magic(stat, buf, buf_size, dir, in isp_stat_buf_sync_magic_for_cpu()
121 static int isp_stat_buf_check_magic(struct ispstat *stat, in isp_stat_buf_check_magic() argument
124 const u32 buf_size = IS_H3A_AF(stat) ? in isp_stat_buf_check_magic()
130 isp_stat_buf_sync_magic_for_cpu(stat, buf, buf_size, DMA_FROM_DEVICE); in isp_stat_buf_check_magic()
138 dev_dbg(stat->isp->dev, "%s: beginning magic check does not " in isp_stat_buf_check_magic()
139 "match.\n", stat->subdev.name); in isp_stat_buf_check_magic()
147 dev_dbg(stat->isp->dev, "%s: endding magic check does " in isp_stat_buf_check_magic()
148 "not match.\n", stat->subdev.name); in isp_stat_buf_check_magic()
153 isp_stat_buf_sync_magic_for_device(stat, buf, buf_size, in isp_stat_buf_check_magic()
159 static void isp_stat_buf_insert_magic(struct ispstat *stat, in isp_stat_buf_insert_magic() argument
162 const u32 buf_size = IS_H3A_AF(stat) ? in isp_stat_buf_insert_magic()
163 stat->buf_size + AF_EXTRA_DATA : stat->buf_size; in isp_stat_buf_insert_magic()
165 isp_stat_buf_sync_magic_for_cpu(stat, buf, buf_size, DMA_FROM_DEVICE); in isp_stat_buf_insert_magic()
176 isp_stat_buf_sync_magic_for_device(stat, buf, buf_size, in isp_stat_buf_insert_magic()
180 static void isp_stat_buf_sync_for_device(struct ispstat *stat, in isp_stat_buf_sync_for_device() argument
183 if (IS_COHERENT_BUF(stat)) in isp_stat_buf_sync_for_device()
186 dma_sync_sg_for_device(stat->isp->dev, buf->iovm->sgt->sgl, in isp_stat_buf_sync_for_device()
190 static void isp_stat_buf_sync_for_cpu(struct ispstat *stat, in isp_stat_buf_sync_for_cpu() argument
193 if (IS_COHERENT_BUF(stat)) in isp_stat_buf_sync_for_cpu()
196 dma_sync_sg_for_cpu(stat->isp->dev, buf->iovm->sgt->sgl, in isp_stat_buf_sync_for_cpu()
200 static void isp_stat_buf_clear(struct ispstat *stat) in isp_stat_buf_clear() argument
205 stat->buf[i].empty = 1; in isp_stat_buf_clear()
209 __isp_stat_buf_find(struct ispstat *stat, int look_empty) in __isp_stat_buf_find() argument
215 struct ispstat_buffer *curr = &stat->buf[i]; in __isp_stat_buf_find()
221 if (curr == stat->locked_buf || curr == stat->active_buf) in __isp_stat_buf_find()
244 isp_stat_buf_find_oldest(struct ispstat *stat) in isp_stat_buf_find_oldest() argument
246 return __isp_stat_buf_find(stat, 0); in isp_stat_buf_find_oldest()
250 isp_stat_buf_find_oldest_or_empty(struct ispstat *stat) in isp_stat_buf_find_oldest_or_empty() argument
252 return __isp_stat_buf_find(stat, 1); in isp_stat_buf_find_oldest_or_empty()
255 static int isp_stat_buf_queue(struct ispstat *stat) in isp_stat_buf_queue() argument
257 if (!stat->active_buf) in isp_stat_buf_queue()
260 ktime_get_ts(&stat->active_buf->ts); in isp_stat_buf_queue()
262 stat->active_buf->buf_size = stat->buf_size; in isp_stat_buf_queue()
263 if (isp_stat_buf_check_magic(stat, stat->active_buf)) { in isp_stat_buf_queue()
264 dev_dbg(stat->isp->dev, "%s: data wasn't properly written.\n", in isp_stat_buf_queue()
265 stat->subdev.name); in isp_stat_buf_queue()
268 stat->active_buf->config_counter = stat->config_counter; in isp_stat_buf_queue()
269 stat->active_buf->frame_number = stat->frame_number; in isp_stat_buf_queue()
270 stat->active_buf->empty = 0; in isp_stat_buf_queue()
271 stat->active_buf = NULL; in isp_stat_buf_queue()
277 static void isp_stat_buf_next(struct ispstat *stat) in isp_stat_buf_next() argument
279 if (unlikely(stat->active_buf)) in isp_stat_buf_next()
281 dev_dbg(stat->isp->dev, "%s: new buffer requested without " in isp_stat_buf_next()
283 stat->subdev.name); in isp_stat_buf_next()
285 stat->active_buf = isp_stat_buf_find_oldest_or_empty(stat); in isp_stat_buf_next()
288 static void isp_stat_buf_release(struct ispstat *stat) in isp_stat_buf_release() argument
292 isp_stat_buf_sync_for_device(stat, stat->locked_buf); in isp_stat_buf_release()
293 spin_lock_irqsave(&stat->isp->stat_lock, flags); in isp_stat_buf_release()
294 stat->locked_buf = NULL; in isp_stat_buf_release()
295 spin_unlock_irqrestore(&stat->isp->stat_lock, flags); in isp_stat_buf_release()
299 static struct ispstat_buffer *isp_stat_buf_get(struct ispstat *stat, in isp_stat_buf_get() argument
306 spin_lock_irqsave(&stat->isp->stat_lock, flags); in isp_stat_buf_get()
309 buf = isp_stat_buf_find_oldest(stat); in isp_stat_buf_get()
311 spin_unlock_irqrestore(&stat->isp->stat_lock, flags); in isp_stat_buf_get()
312 dev_dbg(stat->isp->dev, "%s: cannot find a buffer.\n", in isp_stat_buf_get()
313 stat->subdev.name); in isp_stat_buf_get()
316 if (isp_stat_buf_check_magic(stat, buf)) { in isp_stat_buf_get()
317 dev_dbg(stat->isp->dev, "%s: current buffer has " in isp_stat_buf_get()
318 "corrupted data\n.", stat->subdev.name); in isp_stat_buf_get()
327 stat->locked_buf = buf; in isp_stat_buf_get()
329 spin_unlock_irqrestore(&stat->isp->stat_lock, flags); in isp_stat_buf_get()
332 dev_warn(stat->isp->dev, "%s: userspace's buffer size is " in isp_stat_buf_get()
333 "not enough.\n", stat->subdev.name); in isp_stat_buf_get()
334 isp_stat_buf_release(stat); in isp_stat_buf_get()
338 isp_stat_buf_sync_for_cpu(stat, buf); in isp_stat_buf_get()
345 dev_info(stat->isp->dev, in isp_stat_buf_get()
347 stat->subdev.name, rval); in isp_stat_buf_get()
349 isp_stat_buf_release(stat); in isp_stat_buf_get()
355 static void isp_stat_bufs_free(struct ispstat *stat) in isp_stat_bufs_free() argument
357 struct isp_device *isp = stat->isp; in isp_stat_bufs_free()
361 struct ispstat_buffer *buf = &stat->buf[i]; in isp_stat_bufs_free()
363 if (!IS_COHERENT_BUF(stat)) { in isp_stat_bufs_free()
375 dma_free_coherent(stat->isp->dev, stat->buf_alloc_size, in isp_stat_bufs_free()
385 dev_dbg(stat->isp->dev, "%s: all buffers were freed.\n", in isp_stat_bufs_free()
386 stat->subdev.name); in isp_stat_bufs_free()
388 stat->buf_alloc_size = 0; in isp_stat_bufs_free()
389 stat->active_buf = NULL; in isp_stat_bufs_free()
392 static int isp_stat_bufs_alloc_iommu(struct ispstat *stat, unsigned int size) in isp_stat_bufs_alloc_iommu() argument
394 struct isp_device *isp = stat->isp; in isp_stat_bufs_alloc_iommu()
397 stat->buf_alloc_size = size; in isp_stat_bufs_alloc_iommu()
400 struct ispstat_buffer *buf = &stat->buf[i]; in isp_stat_bufs_alloc_iommu()
407 dev_err(stat->isp->dev, in isp_stat_bufs_alloc_iommu()
409 "buffer %d\n", stat->subdev.name, i); in isp_stat_bufs_alloc_iommu()
410 isp_stat_bufs_free(stat); in isp_stat_bufs_alloc_iommu()
418 isp_stat_bufs_free(stat); in isp_stat_bufs_alloc_iommu()
423 buf->virt_addr = omap_da_to_va(stat->isp->dev, in isp_stat_bufs_alloc_iommu()
426 dev_dbg(stat->isp->dev, "%s: buffer[%d] allocated." in isp_stat_bufs_alloc_iommu()
428 stat->subdev.name, i, buf->iommu_addr, in isp_stat_bufs_alloc_iommu()
435 static int isp_stat_bufs_alloc_dma(struct ispstat *stat, unsigned int size) in isp_stat_bufs_alloc_dma() argument
439 stat->buf_alloc_size = size; in isp_stat_bufs_alloc_dma()
442 struct ispstat_buffer *buf = &stat->buf[i]; in isp_stat_bufs_alloc_dma()
445 buf->virt_addr = dma_alloc_coherent(stat->isp->dev, size, in isp_stat_bufs_alloc_dma()
449 dev_info(stat->isp->dev, in isp_stat_bufs_alloc_dma()
451 "DMA buffer %d\n", stat->subdev.name, i); in isp_stat_bufs_alloc_dma()
452 isp_stat_bufs_free(stat); in isp_stat_bufs_alloc_dma()
457 dev_dbg(stat->isp->dev, "%s: buffer[%d] allocated." in isp_stat_bufs_alloc_dma()
459 stat->subdev.name, i, (unsigned long)buf->dma_addr, in isp_stat_bufs_alloc_dma()
466 static int isp_stat_bufs_alloc(struct ispstat *stat, u32 size) in isp_stat_bufs_alloc() argument
470 spin_lock_irqsave(&stat->isp->stat_lock, flags); in isp_stat_bufs_alloc()
472 BUG_ON(stat->locked_buf != NULL); in isp_stat_bufs_alloc()
475 if (stat->buf_alloc_size >= size) { in isp_stat_bufs_alloc()
476 spin_unlock_irqrestore(&stat->isp->stat_lock, flags); in isp_stat_bufs_alloc()
480 if (stat->state != ISPSTAT_DISABLED || stat->buf_processing) { in isp_stat_bufs_alloc()
481 dev_info(stat->isp->dev, in isp_stat_bufs_alloc()
483 stat->subdev.name); in isp_stat_bufs_alloc()
484 spin_unlock_irqrestore(&stat->isp->stat_lock, flags); in isp_stat_bufs_alloc()
488 spin_unlock_irqrestore(&stat->isp->stat_lock, flags); in isp_stat_bufs_alloc()
490 isp_stat_bufs_free(stat); in isp_stat_bufs_alloc()
492 if (IS_COHERENT_BUF(stat)) in isp_stat_bufs_alloc()
493 return isp_stat_bufs_alloc_dma(stat, size); in isp_stat_bufs_alloc()
495 return isp_stat_bufs_alloc_iommu(stat, size); in isp_stat_bufs_alloc()
498 static void isp_stat_queue_event(struct ispstat *stat, int err) in isp_stat_queue_event() argument
500 struct video_device *vdev = stat->subdev.devnode; in isp_stat_queue_event()
506 status->frame_number = stat->frame_number; in isp_stat_queue_event()
507 status->config_counter = stat->config_counter; in isp_stat_queue_event()
511 event.type = stat->event_type; in isp_stat_queue_event()
522 int omap3isp_stat_request_statistics(struct ispstat *stat, in omap3isp_stat_request_statistics() argument
527 if (stat->state != ISPSTAT_ENABLED) { in omap3isp_stat_request_statistics()
528 dev_dbg(stat->isp->dev, "%s: engine not enabled.\n", in omap3isp_stat_request_statistics()
529 stat->subdev.name); in omap3isp_stat_request_statistics()
533 mutex_lock(&stat->ioctl_lock); in omap3isp_stat_request_statistics()
534 buf = isp_stat_buf_get(stat, data); in omap3isp_stat_request_statistics()
536 mutex_unlock(&stat->ioctl_lock); in omap3isp_stat_request_statistics()
547 isp_stat_buf_release(stat); in omap3isp_stat_request_statistics()
548 mutex_unlock(&stat->ioctl_lock); in omap3isp_stat_request_statistics()
561 int omap3isp_stat_config(struct ispstat *stat, void *new_conf) in omap3isp_stat_config() argument
569 dev_dbg(stat->isp->dev, "%s: configuration is NULL\n", in omap3isp_stat_config()
570 stat->subdev.name); in omap3isp_stat_config()
574 mutex_lock(&stat->ioctl_lock); in omap3isp_stat_config()
576 dev_dbg(stat->isp->dev, "%s: configuring module with buffer " in omap3isp_stat_config()
577 "size=0x%08lx\n", stat->subdev.name, (unsigned long)buf_size); in omap3isp_stat_config()
579 ret = stat->ops->validate_params(stat, new_conf); in omap3isp_stat_config()
581 mutex_unlock(&stat->ioctl_lock); in omap3isp_stat_config()
582 dev_dbg(stat->isp->dev, "%s: configuration values are " in omap3isp_stat_config()
583 "invalid.\n", stat->subdev.name); in omap3isp_stat_config()
588 dev_dbg(stat->isp->dev, "%s: driver has corrected buffer size " in omap3isp_stat_config()
589 "request to 0x%08lx\n", stat->subdev.name, in omap3isp_stat_config()
603 if (IS_H3A(stat)) { in omap3isp_stat_config()
605 if (IS_H3A_AF(stat)) in omap3isp_stat_config()
611 if (stat->recover_priv) { in omap3isp_stat_config()
613 stat->recover_priv; in omap3isp_stat_config()
622 ret = isp_stat_bufs_alloc(stat, buf_size); in omap3isp_stat_config()
624 mutex_unlock(&stat->ioctl_lock); in omap3isp_stat_config()
628 spin_lock_irqsave(&stat->isp->stat_lock, irqflags); in omap3isp_stat_config()
629 stat->ops->set_params(stat, new_conf); in omap3isp_stat_config()
630 spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); in omap3isp_stat_config()
636 user_cfg->config_counter = stat->config_counter + stat->inc_config; in omap3isp_stat_config()
639 stat->configured = 1; in omap3isp_stat_config()
640 dev_dbg(stat->isp->dev, "%s: module has been successfully " in omap3isp_stat_config()
641 "configured.\n", stat->subdev.name); in omap3isp_stat_config()
643 mutex_unlock(&stat->ioctl_lock); in omap3isp_stat_config()
654 static int isp_stat_buf_process(struct ispstat *stat, int buf_state) in isp_stat_buf_process() argument
658 if (!atomic_add_unless(&stat->buf_err, -1, 0) && in isp_stat_buf_process()
659 buf_state == STAT_BUF_DONE && stat->state == ISPSTAT_ENABLED) { in isp_stat_buf_process()
660 ret = isp_stat_buf_queue(stat); in isp_stat_buf_process()
661 isp_stat_buf_next(stat); in isp_stat_buf_process()
667 int omap3isp_stat_pcr_busy(struct ispstat *stat) in omap3isp_stat_pcr_busy() argument
669 return stat->ops->busy(stat); in omap3isp_stat_pcr_busy()
672 int omap3isp_stat_busy(struct ispstat *stat) in omap3isp_stat_busy() argument
674 return omap3isp_stat_pcr_busy(stat) | stat->buf_processing | in omap3isp_stat_busy()
675 (stat->state != ISPSTAT_DISABLED); in omap3isp_stat_busy()
685 static void isp_stat_pcr_enable(struct ispstat *stat, u8 pcr_enable) in isp_stat_pcr_enable() argument
687 if ((stat->state != ISPSTAT_ENABLING && in isp_stat_pcr_enable()
688 stat->state != ISPSTAT_ENABLED) && pcr_enable) in isp_stat_pcr_enable()
692 stat->ops->enable(stat, pcr_enable); in isp_stat_pcr_enable()
693 if (stat->state == ISPSTAT_DISABLING && !pcr_enable) in isp_stat_pcr_enable()
694 stat->state = ISPSTAT_DISABLED; in isp_stat_pcr_enable()
695 else if (stat->state == ISPSTAT_ENABLING && pcr_enable) in isp_stat_pcr_enable()
696 stat->state = ISPSTAT_ENABLED; in isp_stat_pcr_enable()
699 void omap3isp_stat_suspend(struct ispstat *stat) in omap3isp_stat_suspend() argument
703 spin_lock_irqsave(&stat->isp->stat_lock, flags); in omap3isp_stat_suspend()
705 if (stat->state != ISPSTAT_DISABLED) in omap3isp_stat_suspend()
706 stat->ops->enable(stat, 0); in omap3isp_stat_suspend()
707 if (stat->state == ISPSTAT_ENABLED) in omap3isp_stat_suspend()
708 stat->state = ISPSTAT_SUSPENDED; in omap3isp_stat_suspend()
710 spin_unlock_irqrestore(&stat->isp->stat_lock, flags); in omap3isp_stat_suspend()
713 void omap3isp_stat_resume(struct ispstat *stat) in omap3isp_stat_resume() argument
716 if (stat->state == ISPSTAT_SUSPENDED) in omap3isp_stat_resume()
717 stat->state = ISPSTAT_ENABLING; in omap3isp_stat_resume()
720 static void isp_stat_try_enable(struct ispstat *stat) in isp_stat_try_enable() argument
724 if (stat->priv == NULL) in isp_stat_try_enable()
728 spin_lock_irqsave(&stat->isp->stat_lock, irqflags); in isp_stat_try_enable()
729 if (stat->state == ISPSTAT_ENABLING && !stat->buf_processing && in isp_stat_try_enable()
730 stat->buf_alloc_size) { in isp_stat_try_enable()
735 stat->update = 1; in isp_stat_try_enable()
736 isp_stat_buf_next(stat); in isp_stat_try_enable()
737 stat->ops->setup_regs(stat, stat->priv); in isp_stat_try_enable()
738 isp_stat_buf_insert_magic(stat, stat->active_buf); in isp_stat_try_enable()
746 if (!IS_H3A(stat)) in isp_stat_try_enable()
747 atomic_set(&stat->buf_err, 0); in isp_stat_try_enable()
749 isp_stat_pcr_enable(stat, 1); in isp_stat_try_enable()
750 spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); in isp_stat_try_enable()
751 dev_dbg(stat->isp->dev, "%s: module is enabled.\n", in isp_stat_try_enable()
752 stat->subdev.name); in isp_stat_try_enable()
754 spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); in isp_stat_try_enable()
758 void omap3isp_stat_isr_frame_sync(struct ispstat *stat) in omap3isp_stat_isr_frame_sync() argument
760 isp_stat_try_enable(stat); in omap3isp_stat_isr_frame_sync()
763 void omap3isp_stat_sbl_overflow(struct ispstat *stat) in omap3isp_stat_sbl_overflow() argument
767 spin_lock_irqsave(&stat->isp->stat_lock, irqflags); in omap3isp_stat_sbl_overflow()
772 atomic_set(&stat->buf_err, 2); in omap3isp_stat_sbl_overflow()
780 if (stat->recover_priv) in omap3isp_stat_sbl_overflow()
781 stat->sbl_ovl_recover = 1; in omap3isp_stat_sbl_overflow()
782 spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); in omap3isp_stat_sbl_overflow()
792 int omap3isp_stat_enable(struct ispstat *stat, u8 enable) in omap3isp_stat_enable() argument
796 dev_dbg(stat->isp->dev, "%s: user wants to %s module.\n", in omap3isp_stat_enable()
797 stat->subdev.name, enable ? "enable" : "disable"); in omap3isp_stat_enable()
800 mutex_lock(&stat->ioctl_lock); in omap3isp_stat_enable()
802 spin_lock_irqsave(&stat->isp->stat_lock, irqflags); in omap3isp_stat_enable()
804 if (!stat->configured && enable) { in omap3isp_stat_enable()
805 spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); in omap3isp_stat_enable()
806 mutex_unlock(&stat->ioctl_lock); in omap3isp_stat_enable()
807 dev_dbg(stat->isp->dev, "%s: cannot enable module as it's " in omap3isp_stat_enable()
809 stat->subdev.name); in omap3isp_stat_enable()
814 if (stat->state == ISPSTAT_DISABLING) in omap3isp_stat_enable()
816 stat->state = ISPSTAT_ENABLED; in omap3isp_stat_enable()
817 else if (stat->state == ISPSTAT_DISABLED) in omap3isp_stat_enable()
819 stat->state = ISPSTAT_ENABLING; in omap3isp_stat_enable()
821 if (stat->state == ISPSTAT_ENABLING) { in omap3isp_stat_enable()
823 stat->state = ISPSTAT_DISABLED; in omap3isp_stat_enable()
824 } else if (stat->state == ISPSTAT_ENABLED) { in omap3isp_stat_enable()
826 stat->state = ISPSTAT_DISABLING; in omap3isp_stat_enable()
827 isp_stat_buf_clear(stat); in omap3isp_stat_enable()
831 spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); in omap3isp_stat_enable()
832 mutex_unlock(&stat->ioctl_lock); in omap3isp_stat_enable()
839 struct ispstat *stat = v4l2_get_subdevdata(subdev); in omap3isp_stat_s_stream() local
846 isp_stat_try_enable(stat); in omap3isp_stat_s_stream()
850 omap3isp_stat_enable(stat, 0); in omap3isp_stat_s_stream()
851 spin_lock_irqsave(&stat->isp->stat_lock, flags); in omap3isp_stat_s_stream()
852 stat->ops->enable(stat, 0); in omap3isp_stat_s_stream()
853 spin_unlock_irqrestore(&stat->isp->stat_lock, flags); in omap3isp_stat_s_stream()
866 if (!omap3isp_stat_pcr_busy(stat)) in omap3isp_stat_s_stream()
867 omap3isp_stat_isr(stat); in omap3isp_stat_s_stream()
869 dev_dbg(stat->isp->dev, "%s: module is being disabled\n", in omap3isp_stat_s_stream()
870 stat->subdev.name); in omap3isp_stat_s_stream()
879 static void __stat_isr(struct ispstat *stat, int from_dma) in __stat_isr() argument
891 spin_lock_irqsave(&stat->isp->stat_lock, irqflags); in __stat_isr()
892 if (stat->state == ISPSTAT_DISABLED) { in __stat_isr()
893 spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); in __stat_isr()
896 buf_processing = stat->buf_processing; in __stat_isr()
897 stat->buf_processing = 1; in __stat_isr()
898 stat->ops->enable(stat, 0); in __stat_isr()
901 if (stat->state == ISPSTAT_ENABLED) { in __stat_isr()
902 spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); in __stat_isr()
903 dev_err(stat->isp->dev, in __stat_isr()
905 "processing a buffer.\n", stat->subdev.name); in __stat_isr()
916 spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); in __stat_isr()
920 spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); in __stat_isr()
923 if (!omap3isp_stat_pcr_busy(stat)) { in __stat_isr()
924 if (!from_dma && stat->ops->buf_process) in __stat_isr()
926 ret = stat->ops->buf_process(stat); in __stat_isr()
931 spin_lock_irqsave(&stat->isp->stat_lock, irqflags); in __stat_isr()
938 if (stat->state == ISPSTAT_DISABLING) { in __stat_isr()
939 stat->state = ISPSTAT_DISABLED; in __stat_isr()
940 spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); in __stat_isr()
941 stat->buf_processing = 0; in __stat_isr()
944 pipe = to_isp_pipeline(&stat->subdev.entity); in __stat_isr()
945 stat->frame_number = atomic_read(&pipe->frame_number); in __stat_isr()
952 ret = isp_stat_buf_process(stat, ret); in __stat_isr()
954 if (likely(!stat->sbl_ovl_recover)) { in __stat_isr()
955 stat->ops->setup_regs(stat, stat->priv); in __stat_isr()
962 stat->update = 1; in __stat_isr()
963 stat->ops->setup_regs(stat, stat->recover_priv); in __stat_isr()
964 stat->sbl_ovl_recover = 0; in __stat_isr()
970 stat->update = 1; in __stat_isr()
973 isp_stat_buf_insert_magic(stat, stat->active_buf); in __stat_isr()
986 isp_stat_pcr_enable(stat, 1); in __stat_isr()
987 spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); in __stat_isr()
999 if (stat->ops->buf_process) in __stat_isr()
1006 atomic_set(&stat->buf_err, 1); in __stat_isr()
1009 dev_dbg(stat->isp->dev, "%s: cannot process buffer, " in __stat_isr()
1010 "device is busy.\n", stat->subdev.name); in __stat_isr()
1014 stat->buf_processing = 0; in __stat_isr()
1015 isp_stat_queue_event(stat, ret != STAT_BUF_DONE); in __stat_isr()
1018 void omap3isp_stat_isr(struct ispstat *stat) in omap3isp_stat_isr() argument
1020 __stat_isr(stat, 0); in omap3isp_stat_isr()
1023 void omap3isp_stat_dma_isr(struct ispstat *stat) in omap3isp_stat_dma_isr() argument
1025 __stat_isr(stat, 1); in omap3isp_stat_dma_isr()
1032 struct ispstat *stat = v4l2_get_subdevdata(subdev); in omap3isp_stat_subscribe_event() local
1034 if (sub->type != stat->event_type) in omap3isp_stat_subscribe_event()
1047 void omap3isp_stat_unregister_entities(struct ispstat *stat) in omap3isp_stat_unregister_entities() argument
1049 v4l2_device_unregister_subdev(&stat->subdev); in omap3isp_stat_unregister_entities()
1052 int omap3isp_stat_register_entities(struct ispstat *stat, in omap3isp_stat_register_entities() argument
1055 return v4l2_device_register_subdev(vdev, &stat->subdev); in omap3isp_stat_register_entities()
1058 static int isp_stat_init_entities(struct ispstat *stat, const char *name, in isp_stat_init_entities() argument
1061 struct v4l2_subdev *subdev = &stat->subdev; in isp_stat_init_entities()
1068 v4l2_set_subdevdata(subdev, stat); in isp_stat_init_entities()
1070 stat->pad.flags = MEDIA_PAD_FL_SINK; in isp_stat_init_entities()
1073 return media_entity_init(me, 1, &stat->pad, 0); in isp_stat_init_entities()
1076 int omap3isp_stat_init(struct ispstat *stat, const char *name, in omap3isp_stat_init() argument
1081 stat->buf = kcalloc(STAT_MAX_BUFS, sizeof(*stat->buf), GFP_KERNEL); in omap3isp_stat_init()
1082 if (!stat->buf) in omap3isp_stat_init()
1085 isp_stat_buf_clear(stat); in omap3isp_stat_init()
1086 mutex_init(&stat->ioctl_lock); in omap3isp_stat_init()
1087 atomic_set(&stat->buf_err, 0); in omap3isp_stat_init()
1089 ret = isp_stat_init_entities(stat, name, sd_ops); in omap3isp_stat_init()
1091 mutex_destroy(&stat->ioctl_lock); in omap3isp_stat_init()
1092 kfree(stat->buf); in omap3isp_stat_init()
1098 void omap3isp_stat_cleanup(struct ispstat *stat) in omap3isp_stat_cleanup() argument
1100 media_entity_cleanup(&stat->subdev.entity); in omap3isp_stat_cleanup()
1101 mutex_destroy(&stat->ioctl_lock); in omap3isp_stat_cleanup()
1102 isp_stat_bufs_free(stat); in omap3isp_stat_cleanup()
1103 kfree(stat->buf); in omap3isp_stat_cleanup()