Lines Matching refs:stat
22 #define ISP_STAT_USES_DMAENGINE(stat) ((stat)->dma_ch != NULL) argument
56 #define IS_H3A_AF(stat) ((stat) == &(stat)->isp->isp_af) argument
57 #define IS_H3A_AEWB(stat) ((stat) == &(stat)->isp->isp_aewb) argument
58 #define IS_H3A(stat) (IS_H3A_AF(stat) || IS_H3A_AEWB(stat)) argument
60 static void __isp_stat_buf_sync_magic(struct ispstat *stat, in __isp_stat_buf_sync_magic() argument
68 dma_sync(stat->isp->dev, buf->dma_addr, 0, MAGIC_SIZE, dir); in __isp_stat_buf_sync_magic()
69 dma_sync(stat->isp->dev, buf->dma_addr + (buf_size & PAGE_MASK), in __isp_stat_buf_sync_magic()
73 static void isp_stat_buf_sync_magic_for_device(struct ispstat *stat, in isp_stat_buf_sync_magic_for_device() argument
78 if (ISP_STAT_USES_DMAENGINE(stat)) in isp_stat_buf_sync_magic_for_device()
81 __isp_stat_buf_sync_magic(stat, buf, buf_size, dir, in isp_stat_buf_sync_magic_for_device()
85 static void isp_stat_buf_sync_magic_for_cpu(struct ispstat *stat, in isp_stat_buf_sync_magic_for_cpu() argument
90 if (ISP_STAT_USES_DMAENGINE(stat)) in isp_stat_buf_sync_magic_for_cpu()
93 __isp_stat_buf_sync_magic(stat, buf, buf_size, dir, in isp_stat_buf_sync_magic_for_cpu()
97 static int isp_stat_buf_check_magic(struct ispstat *stat, in isp_stat_buf_check_magic() argument
100 const u32 buf_size = IS_H3A_AF(stat) ? in isp_stat_buf_check_magic()
106 isp_stat_buf_sync_magic_for_cpu(stat, buf, buf_size, DMA_FROM_DEVICE); in isp_stat_buf_check_magic()
114 dev_dbg(stat->isp->dev, in isp_stat_buf_check_magic()
116 stat->subdev.name); in isp_stat_buf_check_magic()
124 dev_dbg(stat->isp->dev, in isp_stat_buf_check_magic()
126 stat->subdev.name); in isp_stat_buf_check_magic()
131 isp_stat_buf_sync_magic_for_device(stat, buf, buf_size, in isp_stat_buf_check_magic()
137 static void isp_stat_buf_insert_magic(struct ispstat *stat, in isp_stat_buf_insert_magic() argument
140 const u32 buf_size = IS_H3A_AF(stat) ? in isp_stat_buf_insert_magic()
141 stat->buf_size + AF_EXTRA_DATA : stat->buf_size; in isp_stat_buf_insert_magic()
143 isp_stat_buf_sync_magic_for_cpu(stat, buf, buf_size, DMA_FROM_DEVICE); in isp_stat_buf_insert_magic()
154 isp_stat_buf_sync_magic_for_device(stat, buf, buf_size, in isp_stat_buf_insert_magic()
158 static void isp_stat_buf_sync_for_device(struct ispstat *stat, in isp_stat_buf_sync_for_device() argument
161 if (ISP_STAT_USES_DMAENGINE(stat)) in isp_stat_buf_sync_for_device()
164 dma_sync_sg_for_device(stat->isp->dev, buf->sgt.sgl, in isp_stat_buf_sync_for_device()
168 static void isp_stat_buf_sync_for_cpu(struct ispstat *stat, in isp_stat_buf_sync_for_cpu() argument
171 if (ISP_STAT_USES_DMAENGINE(stat)) in isp_stat_buf_sync_for_cpu()
174 dma_sync_sg_for_cpu(stat->isp->dev, buf->sgt.sgl, in isp_stat_buf_sync_for_cpu()
178 static void isp_stat_buf_clear(struct ispstat *stat) in isp_stat_buf_clear() argument
183 stat->buf[i].empty = 1; in isp_stat_buf_clear()
187 __isp_stat_buf_find(struct ispstat *stat, int look_empty) in __isp_stat_buf_find() argument
193 struct ispstat_buffer *curr = &stat->buf[i]; in __isp_stat_buf_find()
199 if (curr == stat->locked_buf || curr == stat->active_buf) in __isp_stat_buf_find()
222 isp_stat_buf_find_oldest(struct ispstat *stat) in isp_stat_buf_find_oldest() argument
224 return __isp_stat_buf_find(stat, 0); in isp_stat_buf_find_oldest()
228 isp_stat_buf_find_oldest_or_empty(struct ispstat *stat) in isp_stat_buf_find_oldest_or_empty() argument
230 return __isp_stat_buf_find(stat, 1); in isp_stat_buf_find_oldest_or_empty()
233 static int isp_stat_buf_queue(struct ispstat *stat) in isp_stat_buf_queue() argument
235 if (!stat->active_buf) in isp_stat_buf_queue()
238 ktime_get_ts64(&stat->active_buf->ts); in isp_stat_buf_queue()
240 stat->active_buf->buf_size = stat->buf_size; in isp_stat_buf_queue()
241 if (isp_stat_buf_check_magic(stat, stat->active_buf)) { in isp_stat_buf_queue()
242 dev_dbg(stat->isp->dev, "%s: data wasn't properly written.\n", in isp_stat_buf_queue()
243 stat->subdev.name); in isp_stat_buf_queue()
246 stat->active_buf->config_counter = stat->config_counter; in isp_stat_buf_queue()
247 stat->active_buf->frame_number = stat->frame_number; in isp_stat_buf_queue()
248 stat->active_buf->empty = 0; in isp_stat_buf_queue()
249 stat->active_buf = NULL; in isp_stat_buf_queue()
255 static void isp_stat_buf_next(struct ispstat *stat) in isp_stat_buf_next() argument
257 if (unlikely(stat->active_buf)) in isp_stat_buf_next()
259 dev_dbg(stat->isp->dev, in isp_stat_buf_next()
261 stat->subdev.name); in isp_stat_buf_next()
263 stat->active_buf = isp_stat_buf_find_oldest_or_empty(stat); in isp_stat_buf_next()
266 static void isp_stat_buf_release(struct ispstat *stat) in isp_stat_buf_release() argument
270 isp_stat_buf_sync_for_device(stat, stat->locked_buf); in isp_stat_buf_release()
271 spin_lock_irqsave(&stat->isp->stat_lock, flags); in isp_stat_buf_release()
272 stat->locked_buf = NULL; in isp_stat_buf_release()
273 spin_unlock_irqrestore(&stat->isp->stat_lock, flags); in isp_stat_buf_release()
277 static struct ispstat_buffer *isp_stat_buf_get(struct ispstat *stat, in isp_stat_buf_get() argument
284 spin_lock_irqsave(&stat->isp->stat_lock, flags); in isp_stat_buf_get()
287 buf = isp_stat_buf_find_oldest(stat); in isp_stat_buf_get()
289 spin_unlock_irqrestore(&stat->isp->stat_lock, flags); in isp_stat_buf_get()
290 dev_dbg(stat->isp->dev, "%s: cannot find a buffer.\n", in isp_stat_buf_get()
291 stat->subdev.name); in isp_stat_buf_get()
294 if (isp_stat_buf_check_magic(stat, buf)) { in isp_stat_buf_get()
295 dev_dbg(stat->isp->dev, in isp_stat_buf_get()
297 stat->subdev.name); in isp_stat_buf_get()
306 stat->locked_buf = buf; in isp_stat_buf_get()
308 spin_unlock_irqrestore(&stat->isp->stat_lock, flags); in isp_stat_buf_get()
311 dev_warn(stat->isp->dev, in isp_stat_buf_get()
313 stat->subdev.name); in isp_stat_buf_get()
314 isp_stat_buf_release(stat); in isp_stat_buf_get()
318 isp_stat_buf_sync_for_cpu(stat, buf); in isp_stat_buf_get()
325 dev_info(stat->isp->dev, in isp_stat_buf_get()
327 stat->subdev.name, rval); in isp_stat_buf_get()
329 isp_stat_buf_release(stat); in isp_stat_buf_get()
335 static void isp_stat_bufs_free(struct ispstat *stat) in isp_stat_bufs_free() argument
337 struct device *dev = ISP_STAT_USES_DMAENGINE(stat) in isp_stat_bufs_free()
338 ? NULL : stat->isp->dev; in isp_stat_bufs_free()
342 struct ispstat_buffer *buf = &stat->buf[i]; in isp_stat_bufs_free()
349 dma_free_coherent(dev, stat->buf_alloc_size, buf->virt_addr, in isp_stat_bufs_free()
357 dev_dbg(stat->isp->dev, "%s: all buffers were freed.\n", in isp_stat_bufs_free()
358 stat->subdev.name); in isp_stat_bufs_free()
360 stat->buf_alloc_size = 0; in isp_stat_bufs_free()
361 stat->active_buf = NULL; in isp_stat_bufs_free()
403 static int isp_stat_bufs_alloc(struct ispstat *stat, u32 size) in isp_stat_bufs_alloc() argument
405 struct device *dev = ISP_STAT_USES_DMAENGINE(stat) in isp_stat_bufs_alloc()
406 ? NULL : stat->isp->dev; in isp_stat_bufs_alloc()
410 spin_lock_irqsave(&stat->isp->stat_lock, flags); in isp_stat_bufs_alloc()
412 BUG_ON(stat->locked_buf != NULL); in isp_stat_bufs_alloc()
415 if (stat->buf_alloc_size >= size) { in isp_stat_bufs_alloc()
416 spin_unlock_irqrestore(&stat->isp->stat_lock, flags); in isp_stat_bufs_alloc()
420 if (stat->state != ISPSTAT_DISABLED || stat->buf_processing) { in isp_stat_bufs_alloc()
421 dev_info(stat->isp->dev, in isp_stat_bufs_alloc()
423 stat->subdev.name); in isp_stat_bufs_alloc()
424 spin_unlock_irqrestore(&stat->isp->stat_lock, flags); in isp_stat_bufs_alloc()
428 spin_unlock_irqrestore(&stat->isp->stat_lock, flags); in isp_stat_bufs_alloc()
430 isp_stat_bufs_free(stat); in isp_stat_bufs_alloc()
432 stat->buf_alloc_size = size; in isp_stat_bufs_alloc()
435 struct ispstat_buffer *buf = &stat->buf[i]; in isp_stat_bufs_alloc()
440 dev_err(stat->isp->dev, in isp_stat_bufs_alloc()
442 stat->subdev.name, i); in isp_stat_bufs_alloc()
443 isp_stat_bufs_free(stat); in isp_stat_bufs_alloc()
449 dev_dbg(stat->isp->dev, in isp_stat_bufs_alloc()
451 stat->subdev.name, i, &buf->dma_addr, buf->virt_addr); in isp_stat_bufs_alloc()
457 static void isp_stat_queue_event(struct ispstat *stat, int err) in isp_stat_queue_event() argument
459 struct video_device *vdev = stat->subdev.devnode; in isp_stat_queue_event()
465 status->frame_number = stat->frame_number; in isp_stat_queue_event()
466 status->config_counter = stat->config_counter; in isp_stat_queue_event()
470 event.type = stat->event_type; in isp_stat_queue_event()
481 int omap3isp_stat_request_statistics(struct ispstat *stat, in omap3isp_stat_request_statistics() argument
486 if (stat->state != ISPSTAT_ENABLED) { in omap3isp_stat_request_statistics()
487 dev_dbg(stat->isp->dev, "%s: engine not enabled.\n", in omap3isp_stat_request_statistics()
488 stat->subdev.name); in omap3isp_stat_request_statistics()
492 mutex_lock(&stat->ioctl_lock); in omap3isp_stat_request_statistics()
493 buf = isp_stat_buf_get(stat, data); in omap3isp_stat_request_statistics()
495 mutex_unlock(&stat->ioctl_lock); in omap3isp_stat_request_statistics()
506 isp_stat_buf_release(stat); in omap3isp_stat_request_statistics()
507 mutex_unlock(&stat->ioctl_lock); in omap3isp_stat_request_statistics()
512 int omap3isp_stat_request_statistics_time32(struct ispstat *stat, in omap3isp_stat_request_statistics_time32() argument
518 ret = omap3isp_stat_request_statistics(stat, &data64); in omap3isp_stat_request_statistics_time32()
537 int omap3isp_stat_config(struct ispstat *stat, void *new_conf) in omap3isp_stat_config() argument
544 mutex_lock(&stat->ioctl_lock); in omap3isp_stat_config()
546 dev_dbg(stat->isp->dev, in omap3isp_stat_config()
548 stat->subdev.name, (unsigned long)buf_size); in omap3isp_stat_config()
550 ret = stat->ops->validate_params(stat, new_conf); in omap3isp_stat_config()
552 mutex_unlock(&stat->ioctl_lock); in omap3isp_stat_config()
553 dev_dbg(stat->isp->dev, "%s: configuration values are invalid.\n", in omap3isp_stat_config()
554 stat->subdev.name); in omap3isp_stat_config()
559 dev_dbg(stat->isp->dev, in omap3isp_stat_config()
561 stat->subdev.name, in omap3isp_stat_config()
575 if (IS_H3A(stat)) { in omap3isp_stat_config()
577 if (IS_H3A_AF(stat)) in omap3isp_stat_config()
583 if (stat->recover_priv) { in omap3isp_stat_config()
585 stat->recover_priv; in omap3isp_stat_config()
594 ret = isp_stat_bufs_alloc(stat, buf_size); in omap3isp_stat_config()
596 mutex_unlock(&stat->ioctl_lock); in omap3isp_stat_config()
600 spin_lock_irqsave(&stat->isp->stat_lock, irqflags); in omap3isp_stat_config()
601 stat->ops->set_params(stat, new_conf); in omap3isp_stat_config()
602 spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); in omap3isp_stat_config()
608 user_cfg->config_counter = stat->config_counter + stat->inc_config; in omap3isp_stat_config()
611 stat->configured = 1; in omap3isp_stat_config()
612 dev_dbg(stat->isp->dev, in omap3isp_stat_config()
614 stat->subdev.name); in omap3isp_stat_config()
616 mutex_unlock(&stat->ioctl_lock); in omap3isp_stat_config()
627 static int isp_stat_buf_process(struct ispstat *stat, int buf_state) in isp_stat_buf_process() argument
631 if (!atomic_add_unless(&stat->buf_err, -1, 0) && in isp_stat_buf_process()
632 buf_state == STAT_BUF_DONE && stat->state == ISPSTAT_ENABLED) { in isp_stat_buf_process()
633 ret = isp_stat_buf_queue(stat); in isp_stat_buf_process()
634 isp_stat_buf_next(stat); in isp_stat_buf_process()
640 int omap3isp_stat_pcr_busy(struct ispstat *stat) in omap3isp_stat_pcr_busy() argument
642 return stat->ops->busy(stat); in omap3isp_stat_pcr_busy()
645 int omap3isp_stat_busy(struct ispstat *stat) in omap3isp_stat_busy() argument
647 return omap3isp_stat_pcr_busy(stat) | stat->buf_processing | in omap3isp_stat_busy()
648 (stat->state != ISPSTAT_DISABLED); in omap3isp_stat_busy()
658 static void isp_stat_pcr_enable(struct ispstat *stat, u8 pcr_enable) in isp_stat_pcr_enable() argument
660 if ((stat->state != ISPSTAT_ENABLING && in isp_stat_pcr_enable()
661 stat->state != ISPSTAT_ENABLED) && pcr_enable) in isp_stat_pcr_enable()
665 stat->ops->enable(stat, pcr_enable); in isp_stat_pcr_enable()
666 if (stat->state == ISPSTAT_DISABLING && !pcr_enable) in isp_stat_pcr_enable()
667 stat->state = ISPSTAT_DISABLED; in isp_stat_pcr_enable()
668 else if (stat->state == ISPSTAT_ENABLING && pcr_enable) in isp_stat_pcr_enable()
669 stat->state = ISPSTAT_ENABLED; in isp_stat_pcr_enable()
672 void omap3isp_stat_suspend(struct ispstat *stat) in omap3isp_stat_suspend() argument
676 spin_lock_irqsave(&stat->isp->stat_lock, flags); in omap3isp_stat_suspend()
678 if (stat->state != ISPSTAT_DISABLED) in omap3isp_stat_suspend()
679 stat->ops->enable(stat, 0); in omap3isp_stat_suspend()
680 if (stat->state == ISPSTAT_ENABLED) in omap3isp_stat_suspend()
681 stat->state = ISPSTAT_SUSPENDED; in omap3isp_stat_suspend()
683 spin_unlock_irqrestore(&stat->isp->stat_lock, flags); in omap3isp_stat_suspend()
686 void omap3isp_stat_resume(struct ispstat *stat) in omap3isp_stat_resume() argument
689 if (stat->state == ISPSTAT_SUSPENDED) in omap3isp_stat_resume()
690 stat->state = ISPSTAT_ENABLING; in omap3isp_stat_resume()
693 static void isp_stat_try_enable(struct ispstat *stat) in isp_stat_try_enable() argument
697 if (stat->priv == NULL) in isp_stat_try_enable()
701 spin_lock_irqsave(&stat->isp->stat_lock, irqflags); in isp_stat_try_enable()
702 if (stat->state == ISPSTAT_ENABLING && !stat->buf_processing && in isp_stat_try_enable()
703 stat->buf_alloc_size) { in isp_stat_try_enable()
708 stat->update = 1; in isp_stat_try_enable()
709 isp_stat_buf_next(stat); in isp_stat_try_enable()
710 stat->ops->setup_regs(stat, stat->priv); in isp_stat_try_enable()
711 isp_stat_buf_insert_magic(stat, stat->active_buf); in isp_stat_try_enable()
719 if (!IS_H3A(stat)) in isp_stat_try_enable()
720 atomic_set(&stat->buf_err, 0); in isp_stat_try_enable()
722 isp_stat_pcr_enable(stat, 1); in isp_stat_try_enable()
723 spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); in isp_stat_try_enable()
724 dev_dbg(stat->isp->dev, "%s: module is enabled.\n", in isp_stat_try_enable()
725 stat->subdev.name); in isp_stat_try_enable()
727 spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); in isp_stat_try_enable()
731 void omap3isp_stat_isr_frame_sync(struct ispstat *stat) in omap3isp_stat_isr_frame_sync() argument
733 isp_stat_try_enable(stat); in omap3isp_stat_isr_frame_sync()
736 void omap3isp_stat_sbl_overflow(struct ispstat *stat) in omap3isp_stat_sbl_overflow() argument
740 spin_lock_irqsave(&stat->isp->stat_lock, irqflags); in omap3isp_stat_sbl_overflow()
745 atomic_set(&stat->buf_err, 2); in omap3isp_stat_sbl_overflow()
753 if (stat->recover_priv) in omap3isp_stat_sbl_overflow()
754 stat->sbl_ovl_recover = 1; in omap3isp_stat_sbl_overflow()
755 spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); in omap3isp_stat_sbl_overflow()
765 int omap3isp_stat_enable(struct ispstat *stat, u8 enable) in omap3isp_stat_enable() argument
769 dev_dbg(stat->isp->dev, "%s: user wants to %s module.\n", in omap3isp_stat_enable()
770 stat->subdev.name, enable ? "enable" : "disable"); in omap3isp_stat_enable()
773 mutex_lock(&stat->ioctl_lock); in omap3isp_stat_enable()
775 spin_lock_irqsave(&stat->isp->stat_lock, irqflags); in omap3isp_stat_enable()
777 if (!stat->configured && enable) { in omap3isp_stat_enable()
778 spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); in omap3isp_stat_enable()
779 mutex_unlock(&stat->ioctl_lock); in omap3isp_stat_enable()
780 dev_dbg(stat->isp->dev, in omap3isp_stat_enable()
782 stat->subdev.name); in omap3isp_stat_enable()
787 if (stat->state == ISPSTAT_DISABLING) in omap3isp_stat_enable()
789 stat->state = ISPSTAT_ENABLED; in omap3isp_stat_enable()
790 else if (stat->state == ISPSTAT_DISABLED) in omap3isp_stat_enable()
792 stat->state = ISPSTAT_ENABLING; in omap3isp_stat_enable()
794 if (stat->state == ISPSTAT_ENABLING) { in omap3isp_stat_enable()
796 stat->state = ISPSTAT_DISABLED; in omap3isp_stat_enable()
797 } else if (stat->state == ISPSTAT_ENABLED) { in omap3isp_stat_enable()
799 stat->state = ISPSTAT_DISABLING; in omap3isp_stat_enable()
800 isp_stat_buf_clear(stat); in omap3isp_stat_enable()
804 spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); in omap3isp_stat_enable()
805 mutex_unlock(&stat->ioctl_lock); in omap3isp_stat_enable()
812 struct ispstat *stat = v4l2_get_subdevdata(subdev); in omap3isp_stat_s_stream() local
819 isp_stat_try_enable(stat); in omap3isp_stat_s_stream()
823 omap3isp_stat_enable(stat, 0); in omap3isp_stat_s_stream()
824 spin_lock_irqsave(&stat->isp->stat_lock, flags); in omap3isp_stat_s_stream()
825 stat->ops->enable(stat, 0); in omap3isp_stat_s_stream()
826 spin_unlock_irqrestore(&stat->isp->stat_lock, flags); in omap3isp_stat_s_stream()
839 if (!omap3isp_stat_pcr_busy(stat)) in omap3isp_stat_s_stream()
840 omap3isp_stat_isr(stat); in omap3isp_stat_s_stream()
842 dev_dbg(stat->isp->dev, "%s: module is being disabled\n", in omap3isp_stat_s_stream()
843 stat->subdev.name); in omap3isp_stat_s_stream()
852 static void __stat_isr(struct ispstat *stat, int from_dma) in __stat_isr() argument
864 spin_lock_irqsave(&stat->isp->stat_lock, irqflags); in __stat_isr()
865 if (stat->state == ISPSTAT_DISABLED) { in __stat_isr()
866 spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); in __stat_isr()
869 buf_processing = stat->buf_processing; in __stat_isr()
870 stat->buf_processing = 1; in __stat_isr()
871 stat->ops->enable(stat, 0); in __stat_isr()
874 if (stat->state == ISPSTAT_ENABLED) { in __stat_isr()
875 spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); in __stat_isr()
876 dev_err(stat->isp->dev, in __stat_isr()
878 stat->subdev.name); in __stat_isr()
889 spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); in __stat_isr()
893 spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); in __stat_isr()
896 if (!omap3isp_stat_pcr_busy(stat)) { in __stat_isr()
897 if (!from_dma && stat->ops->buf_process) in __stat_isr()
899 ret = stat->ops->buf_process(stat); in __stat_isr()
904 spin_lock_irqsave(&stat->isp->stat_lock, irqflags); in __stat_isr()
911 if (stat->state == ISPSTAT_DISABLING) { in __stat_isr()
912 stat->state = ISPSTAT_DISABLED; in __stat_isr()
913 spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); in __stat_isr()
914 stat->buf_processing = 0; in __stat_isr()
917 pipe = to_isp_pipeline(&stat->subdev.entity); in __stat_isr()
918 stat->frame_number = atomic_read(&pipe->frame_number); in __stat_isr()
925 ret = isp_stat_buf_process(stat, ret); in __stat_isr()
927 if (likely(!stat->sbl_ovl_recover)) { in __stat_isr()
928 stat->ops->setup_regs(stat, stat->priv); in __stat_isr()
935 stat->update = 1; in __stat_isr()
936 stat->ops->setup_regs(stat, stat->recover_priv); in __stat_isr()
937 stat->sbl_ovl_recover = 0; in __stat_isr()
943 stat->update = 1; in __stat_isr()
946 isp_stat_buf_insert_magic(stat, stat->active_buf); in __stat_isr()
959 isp_stat_pcr_enable(stat, 1); in __stat_isr()
960 spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); in __stat_isr()
972 if (stat->ops->buf_process) in __stat_isr()
979 atomic_set(&stat->buf_err, 1); in __stat_isr()
982 dev_dbg(stat->isp->dev, in __stat_isr()
984 stat->subdev.name); in __stat_isr()
988 stat->buf_processing = 0; in __stat_isr()
989 isp_stat_queue_event(stat, ret != STAT_BUF_DONE); in __stat_isr()
992 void omap3isp_stat_isr(struct ispstat *stat) in omap3isp_stat_isr() argument
994 __stat_isr(stat, 0); in omap3isp_stat_isr()
997 void omap3isp_stat_dma_isr(struct ispstat *stat) in omap3isp_stat_dma_isr() argument
999 __stat_isr(stat, 1); in omap3isp_stat_dma_isr()
1006 struct ispstat *stat = v4l2_get_subdevdata(subdev); in omap3isp_stat_subscribe_event() local
1008 if (sub->type != stat->event_type) in omap3isp_stat_subscribe_event()
1021 void omap3isp_stat_unregister_entities(struct ispstat *stat) in omap3isp_stat_unregister_entities() argument
1023 v4l2_device_unregister_subdev(&stat->subdev); in omap3isp_stat_unregister_entities()
1026 int omap3isp_stat_register_entities(struct ispstat *stat, in omap3isp_stat_register_entities() argument
1029 stat->subdev.dev = vdev->mdev->dev; in omap3isp_stat_register_entities()
1031 return v4l2_device_register_subdev(vdev, &stat->subdev); in omap3isp_stat_register_entities()
1034 static int isp_stat_init_entities(struct ispstat *stat, const char *name, in isp_stat_init_entities() argument
1037 struct v4l2_subdev *subdev = &stat->subdev; in isp_stat_init_entities()
1044 v4l2_set_subdevdata(subdev, stat); in isp_stat_init_entities()
1046 stat->pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT; in isp_stat_init_entities()
1049 return media_entity_pads_init(me, 1, &stat->pad); in isp_stat_init_entities()
1052 int omap3isp_stat_init(struct ispstat *stat, const char *name, in omap3isp_stat_init() argument
1057 stat->buf = kcalloc(STAT_MAX_BUFS, sizeof(*stat->buf), GFP_KERNEL); in omap3isp_stat_init()
1058 if (!stat->buf) in omap3isp_stat_init()
1061 isp_stat_buf_clear(stat); in omap3isp_stat_init()
1062 mutex_init(&stat->ioctl_lock); in omap3isp_stat_init()
1063 atomic_set(&stat->buf_err, 0); in omap3isp_stat_init()
1065 ret = isp_stat_init_entities(stat, name, sd_ops); in omap3isp_stat_init()
1067 mutex_destroy(&stat->ioctl_lock); in omap3isp_stat_init()
1068 kfree(stat->buf); in omap3isp_stat_init()
1074 void omap3isp_stat_cleanup(struct ispstat *stat) in omap3isp_stat_cleanup() argument
1076 media_entity_cleanup(&stat->subdev.entity); in omap3isp_stat_cleanup()
1077 mutex_destroy(&stat->ioctl_lock); in omap3isp_stat_cleanup()
1078 isp_stat_bufs_free(stat); in omap3isp_stat_cleanup()
1079 kfree(stat->buf); in omap3isp_stat_cleanup()
1080 kfree(stat->priv); in omap3isp_stat_cleanup()
1081 kfree(stat->recover_priv); in omap3isp_stat_cleanup()