• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2019 Rockchip Electronics Co., Ltd */
3 
4 #include <media/videobuf2-dma-contig.h>
5 #include <linux/delay.h>
6 #include <linux/of_platform.h>
7 #include <linux/slab.h>
8 #include "dev.h"
9 #include "regs.h"
10 
rkispp_write(struct rkispp_device * dev,u32 reg,u32 val)11 void rkispp_write(struct rkispp_device *dev, u32 reg, u32 val)
12 {
13 	u32 *mem = dev->sw_base_addr + reg;
14 	u32 *flag = dev->sw_base_addr + reg + RKISP_ISPP_SW_REG_SIZE;
15 
16 	*mem = val;
17 	*flag = SW_REG_CACHE;
18 	if (dev->hw_dev->is_single)
19 		writel(val, dev->hw_dev->base_addr + reg);
20 }
21 
rkispp_read(struct rkispp_device * dev,u32 reg)22 u32 rkispp_read(struct rkispp_device *dev, u32 reg)
23 {
24 	u32 val;
25 
26 	if (dev->hw_dev->is_single)
27 		val = readl(dev->hw_dev->base_addr + reg);
28 	else
29 		val = *(u32 *)(dev->sw_base_addr + reg);
30 	return val;
31 }
32 
rkispp_set_bits(struct rkispp_device * dev,u32 reg,u32 mask,u32 val)33 void rkispp_set_bits(struct rkispp_device *dev, u32 reg, u32 mask, u32 val)
34 {
35 	u32 tmp = rkispp_read(dev, reg) & ~mask;
36 
37 	rkispp_write(dev, reg, val | tmp);
38 }
39 
rkispp_clear_bits(struct rkispp_device * dev,u32 reg,u32 mask)40 void rkispp_clear_bits(struct rkispp_device *dev, u32 reg, u32 mask)
41 {
42 	u32 tmp = rkispp_read(dev, reg);
43 
44 	rkispp_write(dev, reg, tmp & ~mask);
45 }
46 
rkispp_update_regs(struct rkispp_device * dev,u32 start,u32 end)47 void rkispp_update_regs(struct rkispp_device *dev, u32 start, u32 end)
48 {
49 	void __iomem *base = dev->hw_dev->base_addr;
50 	u32 i;
51 
52 	if (end > RKISP_ISPP_SW_REG_SIZE - 4) {
53 		dev_err(dev->dev, "%s out of range\n", __func__);
54 		return;
55 	}
56 	for (i = start; i <= end; i += 4) {
57 		u32 *val = dev->sw_base_addr + i;
58 		u32 *flag = dev->sw_base_addr + i + RKISP_ISPP_SW_REG_SIZE;
59 
60 		if (*flag == SW_REG_CACHE)
61 			writel(*val, base + i);
62 	}
63 }
64 
rkispp_allow_buffer(struct rkispp_device * dev,struct rkispp_dummy_buffer * buf)65 int rkispp_allow_buffer(struct rkispp_device *dev,
66 			struct rkispp_dummy_buffer *buf)
67 {
68 	unsigned long attrs = buf->is_need_vaddr ? 0 : DMA_ATTR_NO_KERNEL_MAPPING;
69 	const struct vb2_mem_ops *g_ops = dev->hw_dev->mem_ops;
70 	struct sg_table  *sg_tbl;
71 	void *mem_priv;
72 	int ret = 0;
73 
74 	if (!buf->size) {
75 		ret = -EINVAL;
76 		goto err;
77 	}
78 
79 	if (dev->hw_dev->is_dma_contig)
80 		attrs |= DMA_ATTR_FORCE_CONTIGUOUS;
81 	buf->size = PAGE_ALIGN(buf->size);
82 	mem_priv = g_ops->alloc(dev->hw_dev->dev, attrs, buf->size,
83 				DMA_BIDIRECTIONAL, GFP_KERNEL | GFP_DMA32);
84 	if (IS_ERR_OR_NULL(mem_priv)) {
85 		ret = -ENOMEM;
86 		goto err;
87 	}
88 
89 	buf->mem_priv = mem_priv;
90 	if (dev->hw_dev->is_dma_sg_ops) {
91 		sg_tbl = (struct sg_table *)g_ops->cookie(mem_priv);
92 		buf->dma_addr = sg_dma_address(sg_tbl->sgl);
93 		g_ops->prepare(mem_priv);
94 	} else {
95 		buf->dma_addr = *((dma_addr_t *)g_ops->cookie(mem_priv));
96 	}
97 	if (buf->is_need_vaddr)
98 		buf->vaddr = g_ops->vaddr(mem_priv);
99 	if (buf->is_need_dbuf) {
100 		buf->dbuf = g_ops->get_dmabuf(mem_priv, O_RDWR);
101 		if (buf->is_need_dmafd) {
102 			buf->dma_fd = dma_buf_fd(buf->dbuf, O_CLOEXEC);
103 			if (buf->dma_fd < 0) {
104 				dma_buf_put(buf->dbuf);
105 				ret = buf->dma_fd;
106 				goto err;
107 			}
108 			get_dma_buf(buf->dbuf);
109 		}
110 	}
111 	v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
112 		 "%s buf:0x%x~0x%x size:%d\n", __func__,
113 		 (u32)buf->dma_addr, (u32)buf->dma_addr + buf->size, buf->size);
114 	return ret;
115 err:
116 	dev_err(dev->dev, "%s failed ret:%d\n", __func__, ret);
117 	return ret;
118 }
119 
rkispp_free_buffer(struct rkispp_device * dev,struct rkispp_dummy_buffer * buf)120 void rkispp_free_buffer(struct rkispp_device *dev,
121 			struct rkispp_dummy_buffer *buf)
122 {
123 	const struct vb2_mem_ops *g_ops = dev->hw_dev->mem_ops;
124 
125 	if (buf && buf->mem_priv) {
126 		v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
127 			 "%s buf:0x%x~0x%x\n", __func__,
128 			 (u32)buf->dma_addr, (u32)buf->dma_addr + buf->size);
129 		if (buf->dbuf)
130 			dma_buf_put(buf->dbuf);
131 		g_ops->put(buf->mem_priv);
132 		buf->size = 0;
133 		buf->dbuf = NULL;
134 		buf->vaddr = NULL;
135 		buf->mem_priv = NULL;
136 		buf->is_need_dbuf = false;
137 		buf->is_need_vaddr = false;
138 		buf->is_need_dmafd = false;
139 	}
140 }
141 
rkispp_prepare_buffer(struct rkispp_device * dev,struct rkispp_dummy_buffer * buf)142 void rkispp_prepare_buffer(struct rkispp_device *dev,
143 			struct rkispp_dummy_buffer *buf)
144 {
145 	const struct vb2_mem_ops *g_ops = dev->hw_dev->mem_ops;
146 
147 	if (buf && buf->mem_priv)
148 		g_ops->prepare(buf->mem_priv);
149 }
150 
rkispp_finish_buffer(struct rkispp_device * dev,struct rkispp_dummy_buffer * buf)151 void rkispp_finish_buffer(struct rkispp_device *dev,
152 			struct rkispp_dummy_buffer *buf)
153 {
154 	const struct vb2_mem_ops *g_ops = dev->hw_dev->mem_ops;
155 
156 	if (buf && buf->mem_priv)
157 		g_ops->finish(buf->mem_priv);
158 }
159 
rkispp_attach_hw(struct rkispp_device * ispp)160 int rkispp_attach_hw(struct rkispp_device *ispp)
161 {
162 	struct device_node *np;
163 	struct platform_device *pdev;
164 	struct rkispp_hw_dev *hw;
165 
166 	np = of_parse_phandle(ispp->dev->of_node, "rockchip,hw", 0);
167 	if (!np || !of_device_is_available(np)) {
168 		dev_err(ispp->dev, "failed to get ispp hw node\n");
169 		return -ENODEV;
170 	}
171 
172 	pdev = of_find_device_by_node(np);
173 	of_node_put(np);
174 	if (!pdev) {
175 		dev_err(ispp->dev, "failed to get ispp hw from node\n");
176 		return -ENODEV;
177 	}
178 
179 	hw = platform_get_drvdata(pdev);
180 	if (!hw) {
181 		dev_err(ispp->dev, "failed attach ispp hw\n");
182 		return -EINVAL;
183 	}
184 
185 	if (hw->dev_num)
186 		hw->is_single = false;
187 	ispp->dev_id = hw->dev_num;
188 	hw->ispp[hw->dev_num] = ispp;
189 	hw->dev_num++;
190 	ispp->hw_dev = hw;
191 	ispp->ispp_ver = hw->ispp_ver;
192 
193 	return 0;
194 }
195 
rkispp_init_regbuf(struct rkispp_hw_dev * hw)196 static int rkispp_init_regbuf(struct rkispp_hw_dev *hw)
197 {
198 	struct rkisp_ispp_reg *reg_buf;
199 	u32 i, buf_size;
200 
201 	if (!rkispp_is_reg_withstream_global()) {
202 		hw->reg_buf = NULL;
203 		return 0;
204 	}
205 
206 	buf_size = RKISP_ISPP_REGBUF_NUM * sizeof(struct rkisp_ispp_reg);
207 	hw->reg_buf = vmalloc(buf_size);
208 	if (!hw->reg_buf)
209 		return -ENOMEM;
210 
211 	reg_buf = hw->reg_buf;
212 	for (i = 0; i < RKISP_ISPP_REGBUF_NUM; i++) {
213 		reg_buf[i].stat = ISP_ISPP_FREE;
214 		reg_buf[i].dev_id = 0xFF;
215 		reg_buf[i].frame_id = 0;
216 		reg_buf[i].reg_size = 0;
217 		reg_buf[i].sof_timestamp = 0LL;
218 		reg_buf[i].frame_timestamp = 0LL;
219 	}
220 
221 	return 0;
222 }
223 
rkispp_free_regbuf(struct rkispp_hw_dev * hw)224 static void rkispp_free_regbuf(struct rkispp_hw_dev *hw)
225 {
226 	if (hw->reg_buf) {
227 		vfree(hw->reg_buf);
228 		hw->reg_buf = NULL;
229 	}
230 }
231 
rkispp_find_regbuf_by_stat(struct rkispp_hw_dev * hw,struct rkisp_ispp_reg ** free_buf,enum rkisp_ispp_reg_stat stat)232 static int rkispp_find_regbuf_by_stat(struct rkispp_hw_dev *hw, struct rkisp_ispp_reg **free_buf,
233 				      enum rkisp_ispp_reg_stat stat)
234 {
235 	struct rkisp_ispp_reg *reg_buf = hw->reg_buf;
236 	int i = 0, ret;
237 
238 	*free_buf = NULL;
239 	if (!hw->reg_buf || !rkispp_reg_withstream)
240 		return -EINVAL;
241 
242 	for (i = 0; i < RKISP_ISPP_REGBUF_NUM; i++) {
243 		if (reg_buf[i].stat == stat)
244 			break;
245 	}
246 
247 	ret = -ENODATA;
248 	if (i < RKISP_ISPP_REGBUF_NUM) {
249 		ret = 0;
250 		*free_buf = &reg_buf[i];
251 	}
252 
253 	return ret;
254 }
255 
rkispp_free_pool(struct rkispp_hw_dev * hw)256 static void rkispp_free_pool(struct rkispp_hw_dev *hw)
257 {
258 	const struct vb2_mem_ops *g_ops = hw->mem_ops;
259 	struct rkispp_isp_buf_pool *buf;
260 	int i, j;
261 
262 	if (atomic_read(&hw->refcnt))
263 		return;
264 
265 	for (i = 0; i < RKISPP_BUF_POOL_MAX; i++) {
266 		buf = &hw->pool[i];
267 		if (!buf->dbufs)
268 			break;
269 		if (rkispp_debug)
270 			dev_info(hw->dev, "%s dbufs[%d]:0x%p\n",
271 				 __func__, i, buf->dbufs);
272 		for (j = 0; j < hw->pool[0].group_buf_max; j++) {
273 			if (buf->mem_priv[j]) {
274 				g_ops->unmap_dmabuf(buf->mem_priv[j]);
275 				g_ops->detach_dmabuf(buf->mem_priv[j]);
276 				dma_buf_put(buf->dbufs->dbuf[j]);
277 				buf->mem_priv[j] = NULL;
278 			}
279 		}
280 		buf->dbufs = NULL;
281 	}
282 
283 	rkispp_free_regbuf(hw);
284 	hw->is_idle = true;
285 }
286 
rkispp_init_pool(struct rkispp_hw_dev * hw,struct rkisp_ispp_buf * dbufs)287 static int rkispp_init_pool(struct rkispp_hw_dev *hw, struct rkisp_ispp_buf *dbufs)
288 {
289 	const struct vb2_mem_ops *g_ops = hw->mem_ops;
290 	struct rkispp_isp_buf_pool *pool;
291 	struct sg_table	 *sg_tbl;
292 	int i, ret = 0;
293 	void *mem;
294 
295 	INIT_LIST_HEAD(&hw->list);
296 	/* init dma buf pool */
297 	for (i = 0; i < RKISPP_BUF_POOL_MAX; i++) {
298 		pool = &hw->pool[i];
299 		if (!pool->dbufs)
300 			break;
301 	}
302 	dbufs->is_isp = true;
303 	pool->dbufs = dbufs;
304 	if (rkispp_debug)
305 		dev_info(hw->dev, "%s dbufs[%d]:0x%p\n",
306 			 __func__, i, dbufs);
307 	for (i = 0; i < hw->pool[0].group_buf_max; i++) {
308 		mem = g_ops->attach_dmabuf(hw->dev, dbufs->dbuf[i],
309 			dbufs->dbuf[i]->size, DMA_BIDIRECTIONAL);
310 		if (IS_ERR(mem)) {
311 			ret = PTR_ERR(mem);
312 			goto err;
313 		}
314 		pool->mem_priv[i] = mem;
315 		ret = g_ops->map_dmabuf(mem);
316 		if (ret)
317 			goto err;
318 		if (hw->is_dma_sg_ops) {
319 			sg_tbl = (struct sg_table *)g_ops->cookie(mem);
320 			pool->dma[i] = sg_dma_address(sg_tbl->sgl);
321 		} else {
322 			pool->dma[i] = *((dma_addr_t *)g_ops->cookie(mem));
323 		}
324 		get_dma_buf(dbufs->dbuf[i]);
325 		pool->vaddr[i] = g_ops->vaddr(mem);
326 		if (rkispp_debug)
327 			dev_info(hw->dev, "%s dma[%d]:0x%x\n",
328 				 __func__, i, (u32)pool->dma[i]);
329 
330 	}
331 	rkispp_init_regbuf(hw);
332 	hw->is_idle = true;
333 	return ret;
334 err:
335 	rkispp_free_pool(hw);
336 	return ret;
337 }
338 
rkispp_queue_dmabuf(struct rkispp_hw_dev * hw,struct rkisp_ispp_buf * dbufs)339 static void rkispp_queue_dmabuf(struct rkispp_hw_dev *hw, struct rkisp_ispp_buf *dbufs)
340 {
341 	struct list_head *list = &hw->list;
342 	struct rkispp_device *ispp;
343 	struct rkispp_stream_vdev *vdev;
344 	struct rkisp_ispp_buf *buf = NULL;
345 	unsigned long lock_flags = 0;
346 	u32 val;
347 
348 	spin_lock_irqsave(&hw->buf_lock, lock_flags);
349 	if (!dbufs)
350 		hw->is_idle = true;
351 	if (hw->is_shutdown)
352 		hw->is_idle = false;
353 	if (dbufs && list_empty(list) && hw->is_idle) {
354 		/* ispp idle or handle same device */
355 		buf = dbufs;
356 	} else if (hw->is_idle && !list_empty(list)) {
357 		/* ispp idle and handle first buf in list */
358 		buf = list_first_entry(list,
359 			struct rkisp_ispp_buf, list);
360 		list_del(&buf->list);
361 		if (dbufs)
362 			list_add_tail(&dbufs->list, list);
363 	} else if (dbufs) {
364 		/* new buf into queue wait for handle */
365 		list_add_tail(&dbufs->list, list);
366 	}
367 
368 	if (buf) {
369 		hw->is_idle = false;
370 		hw->cur_dev_id = buf->index;
371 		ispp = hw->ispp[buf->index];
372 		vdev = &ispp->stream_vdev;
373 		val = (vdev->module_ens & ISPP_MODULE_TNR) ? ISPP_MODULE_TNR :
374 		((vdev->module_ens & ISPP_MODULE_NR) ? ISPP_MODULE_NR : ISPP_MODULE_FEC);
375 		ispp->params_vdev.params_ops->rkispp_params_cfg(&ispp->params_vdev, buf->frame_id);
376 		vdev->stream_ops->rkispp_module_work_event(ispp, buf, NULL, val, false);
377 	}
378 
379 	spin_unlock_irqrestore(&hw->buf_lock, lock_flags);
380 }
381 
rkispp_event_handle(struct rkispp_device * ispp,u32 cmd,void * arg)382 int rkispp_event_handle(struct rkispp_device *ispp, u32 cmd, void *arg)
383 {
384 	struct rkispp_hw_dev *hw = ispp->hw_dev;
385 	int ret = 0;
386 
387 	switch (cmd) {
388 	case CMD_STREAM:
389 		if (*(int *)arg)
390 			atomic_inc(&hw->refcnt);
391 		else
392 			atomic_dec(&hw->refcnt);
393 		break;
394 	case CMD_INIT_POOL:
395 		ret = rkispp_init_pool(hw, arg);
396 		break;
397 	case CMD_FREE_POOL:
398 		rkispp_free_pool(hw);
399 		break;
400 	case CMD_QUEUE_DMABUF:
401 		rkispp_queue_dmabuf(hw, arg);
402 		break;
403 	default:
404 		ret = -EFAULT;
405 	}
406 
407 	return ret;
408 }
409 
rkispp_alloc_page_dummy_buf(struct rkispp_device * dev,u32 size)410 static int rkispp_alloc_page_dummy_buf(struct rkispp_device *dev, u32 size)
411 {
412 	struct rkispp_hw_dev *hw = dev->hw_dev;
413 	struct rkispp_dummy_buffer *dummy_buf = &hw->dummy_buf;
414 	u32 i, n_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
415 	struct page *page = NULL, **pages = NULL;
416 	struct sg_table *sg = NULL;
417 	int ret = -ENOMEM;
418 
419 	page = alloc_pages(GFP_KERNEL | GFP_DMA32, 0);
420 	if (!page)
421 		goto err;
422 
423 	pages = kvmalloc_array(n_pages, sizeof(struct page *), GFP_KERNEL);
424 	if (!pages)
425 		goto free_page;
426 	for (i = 0; i < n_pages; i++)
427 		pages[i] = page;
428 
429 	sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
430 	if (!sg)
431 		goto free_pages;
432 	ret = sg_alloc_table_from_pages(sg, pages, n_pages, 0,
433 					n_pages << PAGE_SHIFT, GFP_KERNEL);
434 	if (ret)
435 		goto free_sg;
436 
437 	ret = dma_map_sg(hw->dev, sg->sgl, sg->nents, DMA_BIDIRECTIONAL);
438 	dummy_buf->dma_addr = sg_dma_address(sg->sgl);
439 	dummy_buf->mem_priv = sg;
440 	dummy_buf->pages = pages;
441 	v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
442 		 "%s buf:0x%x map cnt:%d\n", __func__,
443 		 (u32)dummy_buf->dma_addr, ret);
444 	return 0;
445 free_sg:
446 	kfree(sg);
447 free_pages:
448 	kvfree(pages);
449 free_page:
450 	__free_pages(page, 0);
451 err:
452 	return ret;
453 }
454 
rkispp_free_page_dummy_buf(struct rkispp_device * dev)455 static void rkispp_free_page_dummy_buf(struct rkispp_device *dev)
456 {
457 	struct rkispp_dummy_buffer *dummy_buf = &dev->hw_dev->dummy_buf;
458 	struct sg_table *sg = dummy_buf->mem_priv;
459 
460 	if (!sg)
461 		return;
462 	dma_unmap_sg(dev->hw_dev->dev, sg->sgl, sg->nents, DMA_BIDIRECTIONAL);
463 	sg_free_table(sg);
464 	kfree(sg);
465 	__free_pages(dummy_buf->pages[0], 0);
466 	kvfree(dummy_buf->pages);
467 	dummy_buf->mem_priv = NULL;
468 	dummy_buf->pages = NULL;
469 }
470 
rkispp_alloc_common_dummy_buf(struct rkispp_device * dev)471 int rkispp_alloc_common_dummy_buf(struct rkispp_device *dev)
472 {
473 	struct rkispp_hw_dev *hw = dev->hw_dev;
474 	struct rkispp_subdev *sdev = &dev->ispp_sdev;
475 	struct rkispp_dummy_buffer *dummy_buf = &hw->dummy_buf;
476 	u32 w = hw->max_in.w ? hw->max_in.w : sdev->out_fmt.width;
477 	u32 h =  hw->max_in.h ? hw->max_in.h : sdev->out_fmt.height;
478 	u32 size =  w * h * 2;
479 	int ret = 0;
480 
481 	mutex_lock(&hw->dev_lock);
482 	if (dummy_buf->mem_priv)
483 		goto end;
484 
485 	if (hw->is_mmu) {
486 		ret = rkispp_alloc_page_dummy_buf(dev, size);
487 		goto end;
488 	}
489 
490 	dummy_buf->size = size;
491 	ret = rkispp_allow_buffer(dev, dummy_buf);
492 	if (!ret)
493 		v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
494 			 "%s buf:0x%x size:%d\n", __func__,
495 			 (u32)dummy_buf->dma_addr, dummy_buf->size);
496 end:
497 	if (ret < 0)
498 		v4l2_err(&dev->v4l2_dev, "%s failed:%d\n", __func__, ret);
499 	mutex_unlock(&hw->dev_lock);
500 	return ret;
501 }
502 
rkispp_free_common_dummy_buf(struct rkispp_device * dev)503 void rkispp_free_common_dummy_buf(struct rkispp_device *dev)
504 {
505 	struct rkispp_hw_dev *hw = dev->hw_dev;
506 
507 	mutex_lock(&hw->dev_lock);
508 	if (atomic_read(&hw->refcnt) ||
509 	    atomic_read(&dev->stream_vdev.refcnt) > 1)
510 		goto end;
511 	if (hw->is_mmu)
512 		rkispp_free_page_dummy_buf(dev);
513 	else
514 		rkispp_free_buffer(dev, &hw->dummy_buf);
515 end:
516 	mutex_unlock(&hw->dev_lock);
517 }
518 
rkispp_find_regbuf_by_id(struct rkispp_device * ispp,struct rkisp_ispp_reg ** free_buf,u32 dev_id,u32 frame_id)519 int rkispp_find_regbuf_by_id(struct rkispp_device *ispp, struct rkisp_ispp_reg **free_buf,
520 			     u32 dev_id, u32 frame_id)
521 {
522 	struct rkispp_hw_dev *hw = ispp->hw_dev;
523 	struct rkisp_ispp_reg *reg_buf = hw->reg_buf;
524 	int i = 0, ret;
525 
526 	*free_buf = NULL;
527 	if (!hw->reg_buf)
528 		return -EINVAL;
529 
530 	for (i = 0; i < RKISP_ISPP_REGBUF_NUM; i++) {
531 		if (reg_buf[i].dev_id == dev_id && reg_buf[i].frame_id == frame_id)
532 			break;
533 	}
534 
535 	ret = -ENODATA;
536 	if (i < RKISP_ISPP_REGBUF_NUM) {
537 		ret = 0;
538 		*free_buf = &reg_buf[i];
539 	}
540 
541 	return ret;
542 }
543 
rkispp_release_regbuf(struct rkispp_device * ispp,struct rkisp_ispp_reg * freebuf)544 void rkispp_release_regbuf(struct rkispp_device *ispp, struct rkisp_ispp_reg *freebuf)
545 {
546 	struct rkispp_hw_dev *hw = ispp->hw_dev;
547 	struct rkisp_ispp_reg *reg_buf = hw->reg_buf;
548 	int i;
549 
550 	if (!hw->reg_buf)
551 		return;
552 
553 	for (i = 0; i < RKISP_ISPP_REGBUF_NUM; i++) {
554 		if (reg_buf[i].dev_id == freebuf->dev_id &&
555 			reg_buf[i].frame_timestamp < freebuf->frame_timestamp) {
556 			reg_buf[i].frame_id = 0;
557 			reg_buf[i].stat = ISP_ISPP_FREE;
558 		}
559 	}
560 }
561 
rkispp_request_regbuf(struct rkispp_device * dev,struct rkisp_ispp_reg ** free_buf)562 void rkispp_request_regbuf(struct rkispp_device *dev, struct rkisp_ispp_reg **free_buf)
563 {
564 	struct rkispp_hw_dev *hw = dev->hw_dev;
565 	int ret;
566 
567 	if (!hw->reg_buf) {
568 		*free_buf = NULL;
569 		return;
570 	}
571 
572 	ret = rkispp_find_regbuf_by_stat(hw, free_buf, ISP_ISPP_FREE);
573 	if (!ret) {
574 		(*free_buf)->stat = ISP_ISPP_INUSE;
575 	}
576 }
577 
rkispp_is_reg_withstream_global(void)578 bool rkispp_is_reg_withstream_global(void)
579 {
580 	return rkispp_reg_withstream;
581 }
582 
rkispp_is_reg_withstream_local(struct device * dev)583 bool rkispp_is_reg_withstream_local(struct device *dev)
584 {
585 	const char *node_name = dev_name(dev);
586 
587 	if (!node_name)
588 		return false;
589 
590 	if (!memcmp(rkispp_reg_withstream_video_name, node_name,
591 		    strlen(node_name)))
592 		return true;
593 	else
594 		return false;
595 }
596