1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2019 Rockchip Electronics Co., Ltd */
3
4 #include <media/videobuf2-dma-contig.h>
5 #include <media/videobuf2-dma-sg.h>
6 #include <linux/of_platform.h>
7 #include <linux/slab.h>
8 #include "dev.h"
9 #include "isp_ispp.h"
10 #include "regs.h"
11
rkisp_write(struct rkisp_device * dev,u32 reg,u32 val,bool is_direct)12 void rkisp_write(struct rkisp_device *dev, u32 reg, u32 val, bool is_direct)
13 {
14 u32 *mem = dev->sw_base_addr + reg;
15 u32 *flag = dev->sw_base_addr + reg + RKISP_ISP_SW_REG_SIZE;
16
17 *mem = val;
18 *flag = SW_REG_CACHE;
19 if (dev->hw_dev->is_single || is_direct) {
20 *flag = SW_REG_CACHE_SYNC;
21 writel(val, dev->hw_dev->base_addr + reg);
22 }
23 }
24
rkisp_next_write(struct rkisp_device * dev,u32 reg,u32 val,bool is_direct)25 void rkisp_next_write(struct rkisp_device *dev, u32 reg, u32 val, bool is_direct)
26 {
27 u32 offset = RKISP_ISP_SW_MAX_SIZE + reg;
28 u32 *mem = dev->sw_base_addr + offset;
29 u32 *flag = dev->sw_base_addr + offset + RKISP_ISP_SW_REG_SIZE;
30
31 *mem = val;
32 *flag = SW_REG_CACHE;
33 if (dev->hw_dev->is_single || is_direct) {
34 *flag = SW_REG_CACHE_SYNC;
35 writel(val, dev->hw_dev->base_next_addr + reg);
36 }
37 }
38
rkisp_read(struct rkisp_device * dev,u32 reg,bool is_direct)39 u32 rkisp_read(struct rkisp_device *dev, u32 reg, bool is_direct)
40 {
41 u32 val;
42
43 if (dev->hw_dev->is_single || is_direct)
44 val = readl(dev->hw_dev->base_addr + reg);
45 else
46 val = *(u32 *)(dev->sw_base_addr + reg);
47 return val;
48 }
49
rkisp_next_read(struct rkisp_device * dev,u32 reg,bool is_direct)50 u32 rkisp_next_read(struct rkisp_device *dev, u32 reg, bool is_direct)
51 {
52 u32 val;
53
54 if (dev->hw_dev->is_single || is_direct)
55 val = readl(dev->hw_dev->base_next_addr + reg);
56 else
57 val = *(u32 *)(dev->sw_base_addr + RKISP_ISP_SW_MAX_SIZE + reg);
58 return val;
59 }
60
rkisp_read_reg_cache(struct rkisp_device * dev,u32 reg)61 u32 rkisp_read_reg_cache(struct rkisp_device *dev, u32 reg)
62 {
63 return *(u32 *)(dev->sw_base_addr + reg);
64 }
65
rkisp_next_read_reg_cache(struct rkisp_device * dev,u32 reg)66 u32 rkisp_next_read_reg_cache(struct rkisp_device *dev, u32 reg)
67 {
68 return *(u32 *)(dev->sw_base_addr + RKISP_ISP_SW_MAX_SIZE + reg);
69 }
70
rkisp_set_bits(struct rkisp_device * dev,u32 reg,u32 mask,u32 val,bool is_direct)71 void rkisp_set_bits(struct rkisp_device *dev, u32 reg, u32 mask, u32 val, bool is_direct)
72 {
73 u32 tmp = rkisp_read(dev, reg, is_direct) & ~mask;
74
75 rkisp_write(dev, reg, val | tmp, is_direct);
76 }
77
rkisp_next_set_bits(struct rkisp_device * dev,u32 reg,u32 mask,u32 val,bool is_direct)78 void rkisp_next_set_bits(struct rkisp_device *dev, u32 reg, u32 mask, u32 val, bool is_direct)
79 {
80 u32 tmp = rkisp_next_read(dev, reg, is_direct) & ~mask;
81
82 rkisp_next_write(dev, reg, val | tmp, is_direct);
83 }
84
rkisp_clear_bits(struct rkisp_device * dev,u32 reg,u32 mask,bool is_direct)85 void rkisp_clear_bits(struct rkisp_device *dev, u32 reg, u32 mask, bool is_direct)
86 {
87 u32 tmp = rkisp_read(dev, reg, is_direct);
88
89 rkisp_write(dev, reg, tmp & ~mask, is_direct);
90 }
91
rkisp_next_clear_bits(struct rkisp_device * dev,u32 reg,u32 mask,bool is_direct)92 void rkisp_next_clear_bits(struct rkisp_device *dev, u32 reg, u32 mask, bool is_direct)
93 {
94 u32 tmp = rkisp_next_read(dev, reg, is_direct);
95
96 rkisp_next_write(dev, reg, tmp & ~mask, is_direct);
97 }
98
rkisp_update_regs(struct rkisp_device * dev,u32 start,u32 end)99 void rkisp_update_regs(struct rkisp_device *dev, u32 start, u32 end)
100 {
101 void __iomem *base = dev->hw_dev->base_addr;
102 u32 i;
103
104 if (end > RKISP_ISP_SW_REG_SIZE - 4) {
105 dev_err(dev->dev, "%s out of range\n", __func__);
106 return;
107 }
108 for (i = start; i <= end; i += 4) {
109 u32 *val = dev->sw_base_addr + i;
110 u32 *flag = dev->sw_base_addr + i + RKISP_ISP_SW_REG_SIZE;
111
112 if (*flag == SW_REG_CACHE)
113 writel(*val, base + i);
114 }
115 }
116
rkisp_alloc_buffer(struct rkisp_device * dev,struct rkisp_dummy_buffer * buf)117 int rkisp_alloc_buffer(struct rkisp_device *dev,
118 struct rkisp_dummy_buffer *buf)
119 {
120 unsigned long attrs = buf->is_need_vaddr ? 0 : DMA_ATTR_NO_KERNEL_MAPPING;
121 const struct vb2_mem_ops *g_ops = dev->hw_dev->mem_ops;
122 struct sg_table *sg_tbl;
123 void *mem_priv;
124 int ret = 0;
125
126 if (!buf->size) {
127 ret = -EINVAL;
128 goto err;
129 }
130
131 if (dev->hw_dev->is_dma_contig)
132 attrs |= DMA_ATTR_FORCE_CONTIGUOUS;
133 buf->size = PAGE_ALIGN(buf->size);
134 mem_priv = g_ops->alloc(dev->hw_dev->dev, attrs, buf->size,
135 DMA_BIDIRECTIONAL, GFP_KERNEL | GFP_DMA32);
136 if (IS_ERR_OR_NULL(mem_priv)) {
137 ret = -ENOMEM;
138 goto err;
139 }
140
141 buf->mem_priv = mem_priv;
142 if (dev->hw_dev->is_dma_sg_ops) {
143 sg_tbl = (struct sg_table *)g_ops->cookie(mem_priv);
144 buf->dma_addr = sg_dma_address(sg_tbl->sgl);
145 g_ops->prepare(mem_priv);
146 } else {
147 buf->dma_addr = *((dma_addr_t *)g_ops->cookie(mem_priv));
148 }
149 if (buf->is_need_vaddr)
150 buf->vaddr = g_ops->vaddr(mem_priv);
151 if (buf->is_need_dbuf) {
152 buf->dbuf = g_ops->get_dmabuf(mem_priv, O_RDWR);
153 if (buf->is_need_dmafd) {
154 buf->dma_fd = dma_buf_fd(buf->dbuf, O_CLOEXEC);
155 if (buf->dma_fd < 0) {
156 dma_buf_put(buf->dbuf);
157 ret = buf->dma_fd;
158 goto err;
159 }
160 get_dma_buf(buf->dbuf);
161 }
162 }
163 v4l2_dbg(1, rkisp_debug, &dev->v4l2_dev,
164 "%s buf:0x%x~0x%x size:%d\n", __func__,
165 (u32)buf->dma_addr, (u32)buf->dma_addr + buf->size, buf->size);
166 return ret;
167 err:
168 dev_err(dev->dev, "%s failed ret:%d\n", __func__, ret);
169 return ret;
170 }
171
rkisp_free_buffer(struct rkisp_device * dev,struct rkisp_dummy_buffer * buf)172 void rkisp_free_buffer(struct rkisp_device *dev,
173 struct rkisp_dummy_buffer *buf)
174 {
175 const struct vb2_mem_ops *g_ops = dev->hw_dev->mem_ops;
176
177 if (buf && buf->mem_priv) {
178 v4l2_dbg(1, rkisp_debug, &dev->v4l2_dev,
179 "%s buf:0x%x~0x%x\n", __func__,
180 (u32)buf->dma_addr, (u32)buf->dma_addr + buf->size);
181 if (buf->dbuf)
182 dma_buf_put(buf->dbuf);
183 g_ops->put(buf->mem_priv);
184 buf->size = 0;
185 buf->dbuf = NULL;
186 buf->vaddr = NULL;
187 buf->mem_priv = NULL;
188 buf->is_need_dbuf = false;
189 buf->is_need_vaddr = false;
190 buf->is_need_dmafd = false;
191 }
192 }
193
rkisp_prepare_buffer(struct rkisp_device * dev,struct rkisp_dummy_buffer * buf)194 void rkisp_prepare_buffer(struct rkisp_device *dev,
195 struct rkisp_dummy_buffer *buf)
196 {
197 const struct vb2_mem_ops *g_ops = dev->hw_dev->mem_ops;
198
199 if (buf && buf->mem_priv)
200 g_ops->prepare(buf->mem_priv);
201 }
202
rkisp_finish_buffer(struct rkisp_device * dev,struct rkisp_dummy_buffer * buf)203 void rkisp_finish_buffer(struct rkisp_device *dev,
204 struct rkisp_dummy_buffer *buf)
205 {
206 const struct vb2_mem_ops *g_ops = dev->hw_dev->mem_ops;
207
208 if (buf && buf->mem_priv)
209 g_ops->finish(buf->mem_priv);
210 }
211
rkisp_attach_hw(struct rkisp_device * isp)212 int rkisp_attach_hw(struct rkisp_device *isp)
213 {
214 struct device_node *np;
215 struct platform_device *pdev;
216 struct rkisp_hw_dev *hw;
217
218 np = of_parse_phandle(isp->dev->of_node, "rockchip,hw", 0);
219 if (!np || !of_device_is_available(np)) {
220 dev_err(isp->dev, "failed to get isp hw node\n");
221 return -ENODEV;
222 }
223
224 pdev = of_find_device_by_node(np);
225 of_node_put(np);
226 if (!pdev) {
227 dev_err(isp->dev, "failed to get isp hw from node\n");
228 return -ENODEV;
229 }
230
231 hw = platform_get_drvdata(pdev);
232 if (!hw) {
233 dev_err(isp->dev, "failed attach isp hw\n");
234 return -EINVAL;
235 }
236
237 if (hw->dev_num)
238 hw->is_single = false;
239 isp->dev_id = hw->dev_num;
240 hw->isp[hw->dev_num] = isp;
241 hw->dev_num++;
242 isp->hw_dev = hw;
243 isp->isp_ver = hw->isp_ver;
244 isp->base_addr = hw->base_addr;
245
246 return 0;
247 }
248
rkisp_alloc_page_dummy_buf(struct rkisp_device * dev,u32 size)249 static int rkisp_alloc_page_dummy_buf(struct rkisp_device *dev, u32 size)
250 {
251 struct rkisp_hw_dev *hw = dev->hw_dev;
252 struct rkisp_dummy_buffer *dummy_buf = &hw->dummy_buf;
253 u32 i, n_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
254 struct page *page = NULL, **pages = NULL;
255 struct sg_table *sg = NULL;
256 int ret = -ENOMEM;
257
258 page = alloc_pages(GFP_KERNEL | GFP_DMA32, 0);
259 if (!page)
260 goto err;
261
262 pages = kvmalloc_array(n_pages, sizeof(struct page *), GFP_KERNEL);
263 if (!pages)
264 goto free_page;
265 for (i = 0; i < n_pages; i++)
266 pages[i] = page;
267
268 sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
269 if (!sg)
270 goto free_pages;
271 ret = sg_alloc_table_from_pages(sg, pages, n_pages, 0,
272 n_pages << PAGE_SHIFT, GFP_KERNEL);
273 if (ret)
274 goto free_sg;
275
276 ret = dma_map_sg(hw->dev, sg->sgl, sg->nents, DMA_BIDIRECTIONAL);
277 dummy_buf->dma_addr = sg_dma_address(sg->sgl);
278 dummy_buf->mem_priv = sg;
279 dummy_buf->pages = pages;
280 v4l2_dbg(1, rkisp_debug, &dev->v4l2_dev,
281 "%s buf:0x%x map cnt:%d size:%d\n", __func__,
282 (u32)dummy_buf->dma_addr, ret, size);
283 return 0;
284 free_sg:
285 kfree(sg);
286 free_pages:
287 kvfree(pages);
288 free_page:
289 __free_pages(page, 0);
290 err:
291 return ret;
292 }
293
rkisp_free_page_dummy_buf(struct rkisp_device * dev)294 static void rkisp_free_page_dummy_buf(struct rkisp_device *dev)
295 {
296 struct rkisp_dummy_buffer *dummy_buf = &dev->hw_dev->dummy_buf;
297 struct sg_table *sg = dummy_buf->mem_priv;
298
299 if (!sg)
300 return;
301 dma_unmap_sg(dev->hw_dev->dev, sg->sgl, sg->nents, DMA_BIDIRECTIONAL);
302 sg_free_table(sg);
303 kfree(sg);
304 __free_pages(dummy_buf->pages[0], 0);
305 kvfree(dummy_buf->pages);
306 dummy_buf->mem_priv = NULL;
307 dummy_buf->pages = NULL;
308 }
309
rkisp_alloc_common_dummy_buf(struct rkisp_device * dev)310 int rkisp_alloc_common_dummy_buf(struct rkisp_device *dev)
311 {
312 struct rkisp_hw_dev *hw = dev->hw_dev;
313 struct rkisp_dummy_buffer *dummy_buf = &hw->dummy_buf;
314 struct rkisp_stream *stream;
315 struct rkisp_device *isp;
316 u32 i, j, val, size = 0;
317 int ret = 0;
318
319 if (dummy_buf->mem_priv)
320 goto end;
321
322 if (hw->max_in.w && hw->max_in.h)
323 size = hw->max_in.w * hw->max_in.h * 2;
324 for (i = 0; i < hw->dev_num; i++) {
325 isp = hw->isp[i];
326 for (j = 0; j < RKISP_MAX_STREAM; j++) {
327 stream = &isp->cap_dev.stream[j];
328 if (!stream->linked)
329 continue;
330 val = stream->out_isp_fmt.fmt_type == FMT_FBC ?
331 stream->out_fmt.plane_fmt[1].sizeimage :
332 stream->out_fmt.plane_fmt[0].bytesperline *
333 stream->out_fmt.height;
334 size = max(size, val);
335 }
336 }
337 if (size == 0)
338 goto end;
339
340 if (hw->is_mmu) {
341 ret = rkisp_alloc_page_dummy_buf(dev, size);
342 goto end;
343 }
344
345 dummy_buf->size = size;
346 ret = rkisp_alloc_buffer(dev, dummy_buf);
347 if (!ret)
348 v4l2_dbg(1, rkisp_debug, &dev->v4l2_dev,
349 "%s buf:0x%x size:%d\n", __func__,
350 (u32)dummy_buf->dma_addr, dummy_buf->size);
351 end:
352 if (ret < 0)
353 v4l2_err(&dev->v4l2_dev, "%s failed:%d\n", __func__, ret);
354 return ret;
355 }
356
rkisp_free_common_dummy_buf(struct rkisp_device * dev)357 void rkisp_free_common_dummy_buf(struct rkisp_device *dev)
358 {
359 struct rkisp_hw_dev *hw = dev->hw_dev;
360
361 if (atomic_read(&hw->refcnt) ||
362 atomic_read(&dev->cap_dev.refcnt) > 1)
363 return;
364
365 if (hw->is_mmu)
366 rkisp_free_page_dummy_buf(dev);
367 else
368 rkisp_free_buffer(dev, &hw->dummy_buf);
369 }
370