• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021 Rockchip Electronics Co., Ltd */
3 
4 #include <media/videobuf2-dma-contig.h>
5 #include <media/videobuf2-dma-sg.h>
6 #include <linux/of_platform.h>
7 #include "dev.h"
8 #include "common.h"
9 
rkcif_alloc_buffer(struct rkcif_device * dev,struct rkcif_dummy_buffer * buf)10 int rkcif_alloc_buffer(struct rkcif_device *dev,
11 		       struct rkcif_dummy_buffer *buf)
12 {
13 	unsigned long attrs = buf->is_need_vaddr ? 0 : DMA_ATTR_NO_KERNEL_MAPPING;
14 	const struct vb2_mem_ops *g_ops = dev->hw_dev->mem_ops;
15 	struct sg_table	 *sg_tbl;
16 	void *mem_priv;
17 	int ret = 0;
18 
19 	if (!buf->size) {
20 		ret = -EINVAL;
21 		goto err;
22 	}
23 
24 	if (dev->hw_dev->is_dma_contig)
25 		attrs |= DMA_ATTR_FORCE_CONTIGUOUS;
26 	buf->size = PAGE_ALIGN(buf->size);
27 	mem_priv = g_ops->alloc(dev->hw_dev->dev, attrs, buf->size,
28 				DMA_BIDIRECTIONAL, GFP_KERNEL | GFP_DMA32);
29 	if (IS_ERR_OR_NULL(mem_priv)) {
30 		ret = -ENOMEM;
31 		goto err;
32 	}
33 
34 	buf->mem_priv = mem_priv;
35 	if (dev->hw_dev->is_dma_sg_ops) {
36 		sg_tbl = (struct sg_table *)g_ops->cookie(mem_priv);
37 		buf->dma_addr = sg_dma_address(sg_tbl->sgl);
38 		g_ops->prepare(mem_priv);
39 	} else {
40 		buf->dma_addr = *((dma_addr_t *)g_ops->cookie(mem_priv));
41 	}
42 	if (buf->is_need_vaddr)
43 		buf->vaddr = g_ops->vaddr(mem_priv);
44 	if (buf->is_need_dbuf) {
45 		buf->dbuf = g_ops->get_dmabuf(mem_priv, O_RDWR);
46 		if (buf->is_need_dmafd) {
47 			buf->dma_fd = dma_buf_fd(buf->dbuf, O_CLOEXEC);
48 			if (buf->dma_fd < 0) {
49 				dma_buf_put(buf->dbuf);
50 				ret = buf->dma_fd;
51 				goto err;
52 			}
53 			get_dma_buf(buf->dbuf);
54 		}
55 	}
56 	v4l2_dbg(1, rkcif_debug, &dev->v4l2_dev,
57 		 "%s buf:0x%x~0x%x size:%d\n", __func__,
58 		 (u32)buf->dma_addr, (u32)buf->dma_addr + buf->size, buf->size);
59 	return ret;
60 err:
61 	dev_err(dev->dev, "%s failed ret:%d\n", __func__, ret);
62 	return ret;
63 }
64 
rkcif_free_buffer(struct rkcif_device * dev,struct rkcif_dummy_buffer * buf)65 void rkcif_free_buffer(struct rkcif_device *dev,
66 			struct rkcif_dummy_buffer *buf)
67 {
68 	const struct vb2_mem_ops *g_ops = dev->hw_dev->mem_ops;
69 
70 	if (buf && buf->mem_priv) {
71 		v4l2_dbg(1, rkcif_debug, &dev->v4l2_dev,
72 			 "%s buf:0x%x~0x%x\n", __func__,
73 			 (u32)buf->dma_addr, (u32)buf->dma_addr + buf->size);
74 		if (buf->dbuf)
75 			dma_buf_put(buf->dbuf);
76 		g_ops->put(buf->mem_priv);
77 		buf->size = 0;
78 		buf->dbuf = NULL;
79 		buf->vaddr = NULL;
80 		buf->mem_priv = NULL;
81 		buf->is_need_dbuf = false;
82 		buf->is_need_vaddr = false;
83 		buf->is_need_dmafd = false;
84 	}
85 }
86 
rkcif_alloc_page_dummy_buf(struct rkcif_device * dev,struct rkcif_dummy_buffer * buf)87 static int rkcif_alloc_page_dummy_buf(struct rkcif_device *dev, struct rkcif_dummy_buffer *buf)
88 {
89 	struct rkcif_hw *hw = dev->hw_dev;
90 	u32 i, n_pages = PAGE_ALIGN(buf->size) >> PAGE_SHIFT;
91 	struct page *page = NULL, **pages = NULL;
92 	struct sg_table *sg = NULL;
93 	int ret = -ENOMEM;
94 
95 	page = alloc_pages(GFP_KERNEL | GFP_DMA32, 0);
96 	if (!page)
97 		goto err;
98 
99 	pages = kvmalloc_array(n_pages, sizeof(struct page *), GFP_KERNEL);
100 	if (!pages)
101 		goto free_page;
102 	for (i = 0; i < n_pages; i++)
103 		pages[i] = page;
104 
105 	sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
106 	if (!sg)
107 		goto free_pages;
108 	ret = sg_alloc_table_from_pages(sg, pages, n_pages, 0,
109 					n_pages << PAGE_SHIFT, GFP_KERNEL);
110 	if (ret)
111 		goto free_sg;
112 
113 	ret = dma_map_sg(hw->dev, sg->sgl, sg->nents, DMA_BIDIRECTIONAL);
114 	buf->dma_addr = sg_dma_address(sg->sgl);
115 	buf->mem_priv = sg;
116 	buf->pages = pages;
117 	v4l2_dbg(1, rkcif_debug, &dev->v4l2_dev,
118 		 "%s buf:0x%x map cnt:%d size:%d\n", __func__,
119 		 (u32)buf->dma_addr, ret, buf->size);
120 	return 0;
121 free_sg:
122 	kfree(sg);
123 free_pages:
124 	kvfree(pages);
125 free_page:
126 	__free_pages(page, 0);
127 err:
128 	return ret;
129 }
130 
rkcif_free_page_dummy_buf(struct rkcif_device * dev,struct rkcif_dummy_buffer * buf)131 static void rkcif_free_page_dummy_buf(struct rkcif_device *dev, struct rkcif_dummy_buffer *buf)
132 {
133 	struct sg_table *sg = buf->mem_priv;
134 
135 	if (!sg)
136 		return;
137 	dma_unmap_sg(dev->hw_dev->dev, sg->sgl, sg->nents, DMA_BIDIRECTIONAL);
138 	sg_free_table(sg);
139 	kfree(sg);
140 	__free_pages(buf->pages[0], 0);
141 	kvfree(buf->pages);
142 	buf->mem_priv = NULL;
143 	buf->pages = NULL;
144 }
145 
rkcif_alloc_common_dummy_buf(struct rkcif_device * dev,struct rkcif_dummy_buffer * buf)146 int rkcif_alloc_common_dummy_buf(struct rkcif_device *dev, struct rkcif_dummy_buffer *buf)
147 {
148 	struct rkcif_hw *hw = dev->hw_dev;
149 	int ret = 0;
150 
151 	mutex_lock(&hw->dev_lock);
152 	if (buf->mem_priv)
153 		goto end;
154 
155 	if (buf->size == 0)
156 		goto end;
157 
158 	if (hw->iommu_en) {
159 		ret = rkcif_alloc_page_dummy_buf(dev, buf);
160 		goto end;
161 	}
162 
163 	ret = rkcif_alloc_buffer(dev, buf);
164 	if (!ret)
165 		v4l2_dbg(1, rkcif_debug, &dev->v4l2_dev,
166 			 "%s buf:0x%x size:%d\n", __func__,
167 			 (u32)buf->dma_addr, buf->size);
168 end:
169 	if (ret < 0)
170 		v4l2_err(&dev->v4l2_dev, "%s failed:%d\n", __func__, ret);
171 	mutex_unlock(&hw->dev_lock);
172 	return ret;
173 }
174 
rkcif_free_common_dummy_buf(struct rkcif_device * dev,struct rkcif_dummy_buffer * buf)175 void rkcif_free_common_dummy_buf(struct rkcif_device *dev, struct rkcif_dummy_buffer *buf)
176 {
177 	struct rkcif_hw *hw = dev->hw_dev;
178 
179 	mutex_lock(&hw->dev_lock);
180 
181 	if (hw->iommu_en)
182 		rkcif_free_page_dummy_buf(dev, buf);
183 	else
184 		rkcif_free_buffer(dev, buf);
185 	mutex_unlock(&hw->dev_lock);
186 }
187