• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2020 Rockchip Electronics Co., Ltd */
3 
4 #include <linux/clk.h>
5 #include <linux/delay.h>
6 #include <linux/interrupt.h>
7 #include <linux/io.h>
8 #include <linux/iommu.h>
9 #include <linux/module.h>
10 #include <linux/of.h>
11 #include <linux/of_graph.h>
12 #include <linux/of_platform.h>
13 #include <linux/of_reserved_mem.h>
14 #include <linux/pinctrl/consumer.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/reset.h>
17 #include <media/videobuf2-dma-contig.h>
18 #include <media/videobuf2-dma-sg.h>
19 #include <soc/rockchip/rockchip_iommu.h>
20 
21 #include "common.h"
22 #include "dev.h"
23 #include "fec.h"
24 #include "hw.h"
25 #include "regs.h"
26 
27 /*
28  * rkispp_hw share hardware resource with rkispp virtual device
29  * rkispp_device rkispp_device rkispp_device rkispp_device
30  *       |             |             |             |
31  *       \             |             |             /
32  *        -----------------------------------------
33  *                           |
34  *                       rkispp_hw
35  */
36 
37 struct irqs_data {
38 	const char *name;
39 	irqreturn_t (*irq_hdl)(int irq, void *ctx);
40 };
41 
rkispp_soft_reset(struct rkispp_hw_dev * hw)42 void rkispp_soft_reset(struct rkispp_hw_dev *hw)
43 {
44 	writel(GLB_SOFT_RST_ALL, hw->base_addr + RKISPP_CTRL_RESET);
45 	udelay(10);
46 	writel(~GLB_SOFT_RST_ALL, hw->base_addr + RKISPP_CTRL_RESET);
47 	if (hw->reset) {
48 		reset_control_assert(hw->reset);
49 		udelay(20);
50 		reset_control_deassert(hw->reset);
51 		udelay(20);
52 	}
53 
54 	/* refresh iommu after reset */
55 	if (hw->is_mmu) {
56 		rockchip_iommu_disable(hw->dev);
57 		rockchip_iommu_enable(hw->dev);
58 	}
59 	if (hw->ispp_ver == ISPP_V10) {
60 		writel(SW_SCL_BYPASS, hw->base_addr + RKISPP_SCL0_CTRL);
61 		writel(SW_SCL_BYPASS, hw->base_addr + RKISPP_SCL1_CTRL);
62 		writel(SW_SCL_BYPASS, hw->base_addr + RKISPP_SCL2_CTRL);
63 		writel(OTHER_FORCE_UPD, hw->base_addr + RKISPP_CTRL_UPDATE);
64 		writel(GATE_DIS_ALL, hw->base_addr + RKISPP_CTRL_CLKGATE);
65 		writel(SW_FEC2DDR_DIS, hw->base_addr + RKISPP_FEC_CORE_CTRL);
66 		writel(NR_LOST_ERR | TNR_LOST_ERR | FBCH_EMPTY_NR |
67 		FBCH_EMPTY_TNR | FBCD_DEC_ERR_NR | FBCD_DEC_ERR_TNR |
68 		BUS_ERR_NR | BUS_ERR_TNR | SCL2_INT | SCL1_INT |
69 		SCL0_INT | FEC_INT | ORB_INT | SHP_INT | NR_INT | TNR_INT,
70 		hw->base_addr + RKISPP_CTRL_INT_MSK);
71 		writel(GATE_DIS_NR, hw->base_addr + RKISPP_CTRL_CLKGATE);
72 	} else if (hw->ispp_ver == ISPP_V20) {
73 		writel(GATE_DIS_ALL, hw->base_addr + RKISPP_CTRL_CLKGATE);
74 		writel(SW_FEC2DDR_DIS, hw->base_addr + RKISPP_FEC_CORE_CTRL);
75 		writel(FEC_INT, hw->base_addr + RKISPP_CTRL_INT_MSK);
76 		writel(GATE_DIS_FEC, hw->base_addr + RKISPP_CTRL_CLKGATE);
77 	}
78 
79 }
80 
81 /* using default value if reg no write for multi device */
default_sw_reg_flag(struct rkispp_device * dev)82 static void default_sw_reg_flag(struct rkispp_device *dev)
83 {
84 	if (dev->hw_dev->ispp_ver == ISPP_V10) {
85 		u32 reg[] = {
86 			RKISPP_TNR_CTRL,
87 			RKISPP_TNR_CORE_CTRL,
88 			RKISPP_NR_CTRL,
89 			RKISPP_NR_UVNR_CTRL_PARA,
90 			RKISPP_SHARP_CTRL,
91 			RKISPP_SHARP_CORE_CTRL,
92 			RKISPP_SCL0_CTRL,
93 			RKISPP_SCL1_CTRL,
94 			RKISPP_SCL2_CTRL,
95 			RKISPP_ORB_CORE_CTRL,
96 			RKISPP_FEC_CTRL,
97 			RKISPP_FEC_CORE_CTRL
98 		};
99 		u32 i, *flag;
100 
101 		for (i = 0; i < ARRAY_SIZE(reg); i++) {
102 			flag = dev->sw_base_addr + reg[i] + RKISP_ISPP_SW_REG_SIZE;
103 			*flag = 0xffffffff;
104 		}
105 	} else if (dev->hw_dev->ispp_ver == ISPP_V20) {
106 		u32 reg[] = {
107 			RKISPP_FEC_CTRL,
108 			RKISPP_FEC_CORE_CTRL
109 		};
110 		u32 i, *flag;
111 
112 		for (i = 0; i < ARRAY_SIZE(reg); i++) {
113 			flag = dev->sw_base_addr + reg[i] + RKISP_ISPP_SW_REG_SIZE;
114 			*flag = 0xffffffff;
115 		}
116 	}
117 }
118 
is_iommu_enable(struct device * dev)119 static inline bool is_iommu_enable(struct device *dev)
120 {
121 	struct device_node *iommu;
122 
123 	iommu = of_parse_phandle(dev->of_node, "iommus", 0);
124 	if (!iommu) {
125 		dev_info(dev, "no iommu attached, using non-iommu buffers\n");
126 		return false;
127 	} else if (!of_device_is_available(iommu)) {
128 		dev_info(dev, "iommu is disabled, using non-iommu buffers\n");
129 		of_node_put(iommu);
130 		return false;
131 	}
132 	of_node_put(iommu);
133 
134 	return true;
135 }
136 
disable_sys_clk(struct rkispp_hw_dev * dev)137 static void disable_sys_clk(struct rkispp_hw_dev *dev)
138 {
139 	int i;
140 
141 	for (i = 0; i < dev->clks_num; i++)
142 		clk_disable_unprepare(dev->clks[i]);
143 }
144 
enable_sys_clk(struct rkispp_hw_dev * dev)145 static int enable_sys_clk(struct rkispp_hw_dev *dev)
146 {
147 	struct rkispp_device *ispp = dev->ispp[dev->cur_dev_id];
148 	u32 w = dev->max_in.w ? dev->max_in.w : ispp->ispp_sdev.in_fmt.width;
149 	int i, ret = -EINVAL;
150 
151 	for (i = 0; i < dev->clks_num; i++) {
152 		ret = clk_prepare_enable(dev->clks[i]);
153 		if (ret < 0)
154 			goto err;
155 	}
156 
157 	for (i = 0; i < dev->clk_rate_tbl_num; i++)
158 		if (w <= dev->clk_rate_tbl[i].refer_data)
159 			break;
160 	if (!dev->is_single)
161 		i++;
162 	if (i > dev->clk_rate_tbl_num - 1)
163 		i = dev->clk_rate_tbl_num - 1;
164 	dev->core_clk_max = dev->clk_rate_tbl[i].clk_rate * 1000000;
165 	dev->core_clk_min = dev->clk_rate_tbl[0].clk_rate * 1000000;
166 	rkispp_set_clk_rate(dev->clks[0], dev->core_clk_min);
167 	dev_dbg(dev->dev, "set ispp clk:%luHz\n", clk_get_rate(dev->clks[0]));
168 	return 0;
169 err:
170 	for (--i; i >= 0; --i)
171 		clk_disable_unprepare(dev->clks[i]);
172 	return ret;
173 }
174 
irq_hdl(int irq,void * ctx)175 static irqreturn_t irq_hdl(int irq, void *ctx)
176 {
177 	struct device *dev = ctx;
178 	struct rkispp_hw_dev *hw_dev = dev_get_drvdata(dev);
179 	struct rkispp_device *ispp = hw_dev->ispp[hw_dev->cur_dev_id];
180 	void __iomem *base = hw_dev->base_addr;
181 	unsigned int mis_val;
182 
183 	spin_lock(&hw_dev->irq_lock);
184 	mis_val = readl(base + RKISPP_CTRL_INT_STA);
185 	writel(mis_val, base + RKISPP_CTRL_INT_CLR);
186 	spin_unlock(&hw_dev->irq_lock);
187 
188 	if (IS_ENABLED(CONFIG_VIDEO_ROCKCHIP_ISPP_FEC) && mis_val & FEC_INT) {
189 		mis_val &= ~FEC_INT;
190 		rkispp_fec_irq(hw_dev);
191 	}
192 
193 	if (mis_val)
194 		ispp->irq_hdl(mis_val, ispp);
195 
196 	return IRQ_HANDLED;
197 }
198 
199 static const char * const rv1126_ispp_clks[] = {
200 	"clk_ispp",
201 	"aclk_ispp",
202 	"hclk_ispp",
203 };
204 
205 static const char * const rk3588_ispp_clks[] = {
206 	"clk_ispp",
207 	"aclk_ispp",
208 	"hclk_ispp",
209 };
210 
211 static const struct ispp_clk_info rv1126_ispp_clk_rate[] = {
212 	{
213 		.clk_rate = 150,
214 		.refer_data = 0,
215 	}, {
216 		.clk_rate = 250,
217 		.refer_data = 1920 //width
218 	}, {
219 		.clk_rate = 350,
220 		.refer_data = 2688,
221 	}, {
222 		.clk_rate = 400,
223 		.refer_data = 3072,
224 	}, {
225 		.clk_rate = 500,
226 		.refer_data = 3840,
227 	}
228 };
229 
230 static const struct ispp_clk_info rk3588_ispp_clk_rate[] = {
231 	{
232 		.clk_rate = 300,
233 		.refer_data = 1920, //width
234 	}, {
235 		.clk_rate = 400,
236 		.refer_data = 2688,
237 	}, {
238 		.clk_rate = 500,
239 		.refer_data = 3072,
240 	}, {
241 		.clk_rate = 600,
242 		.refer_data = 3840,
243 	}, {
244 		.clk_rate = 702,
245 		.refer_data = 4672,
246 	}
247 };
248 
249 static struct irqs_data rv1126_ispp_irqs[] = {
250 	{"ispp_irq", irq_hdl},
251 	{"fec_irq", irq_hdl},
252 };
253 
254 static struct irqs_data rk3588_ispp_irqs[] = {
255 	{"fec_irq", irq_hdl},
256 };
257 
258 static const struct ispp_match_data rv1126_ispp_match_data = {
259 	.clks = rv1126_ispp_clks,
260 	.clks_num = ARRAY_SIZE(rv1126_ispp_clks),
261 	.clk_rate_tbl = rv1126_ispp_clk_rate,
262 	.clk_rate_tbl_num = ARRAY_SIZE(rv1126_ispp_clk_rate),
263 	.irqs = rv1126_ispp_irqs,
264 	.num_irqs = ARRAY_SIZE(rv1126_ispp_irqs),
265 	.ispp_ver = ISPP_V10,
266 };
267 
268 static const struct ispp_match_data rk3588_ispp_match_data = {
269 	.clks = rk3588_ispp_clks,
270 	.clks_num = ARRAY_SIZE(rk3588_ispp_clks),
271 	.clk_rate_tbl = rk3588_ispp_clk_rate,
272 	.clk_rate_tbl_num = ARRAY_SIZE(rk3588_ispp_clk_rate),
273 	.irqs = rk3588_ispp_irqs,
274 	.num_irqs = ARRAY_SIZE(rk3588_ispp_irqs),
275 	.ispp_ver = ISPP_V20,
276 };
277 
278 static const struct of_device_id rkispp_hw_of_match[] = {
279 	{
280 		.compatible = "rockchip,rv1126-rkispp",
281 		.data = &rv1126_ispp_match_data,
282 	}, {
283 		.compatible = "rockchip,rk3588-rkispp",
284 		.data = &rk3588_ispp_match_data,
285 	},
286 	{},
287 };
288 
rkispp_hw_probe(struct platform_device * pdev)289 static int rkispp_hw_probe(struct platform_device *pdev)
290 {
291 	const struct of_device_id *match;
292 	const struct ispp_match_data *match_data;
293 	struct device_node *node = pdev->dev.of_node;
294 	struct device *dev = &pdev->dev;
295 	struct rkispp_hw_dev *hw_dev;
296 	struct resource *res;
297 	int i, ret, irq;
298 	bool is_mem_reserved = true;
299 
300 	match = of_match_node(rkispp_hw_of_match, node);
301 	if (IS_ERR(match))
302 		return PTR_ERR(match);
303 
304 	hw_dev = devm_kzalloc(dev, sizeof(*hw_dev), GFP_KERNEL);
305 	if (!hw_dev)
306 		return -ENOMEM;
307 
308 	dev_set_drvdata(dev, hw_dev);
309 	hw_dev->dev = dev;
310 	match_data = match->data;
311 	hw_dev->match_data = match->data;
312 	hw_dev->max_in.w = 0;
313 	hw_dev->max_in.h = 0;
314 	hw_dev->max_in.fps = 0;
315 	of_property_read_u32_array(node, "max-input", &hw_dev->max_in.w, 3);
316 	dev_info(dev, "max input:%dx%d@%dfps\n",
317 		 hw_dev->max_in.w, hw_dev->max_in.h, hw_dev->max_in.fps);
318 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
319 	if (!res) {
320 		dev_err(dev, "get resource failed\n");
321 		ret = -EINVAL;
322 		goto err;
323 	}
324 	hw_dev->base_addr = devm_ioremap_resource(dev, res);
325 	if (PTR_ERR(hw_dev->base_addr) == -EBUSY) {
326 		resource_size_t offset = res->start;
327 		resource_size_t size = resource_size(res);
328 
329 		hw_dev->base_addr = devm_ioremap(dev, offset, size);
330 	}
331 	if (IS_ERR(hw_dev->base_addr)) {
332 		dev_err(dev, "ioremap failed\n");
333 		ret = PTR_ERR(hw_dev->base_addr);
334 		goto err;
335 	}
336 
337 	rkispp_monitor = device_property_read_bool(dev, "rockchip,restart-monitor-en");
338 	res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
339 					   match_data->irqs[0].name);
340 	if (res) {
341 		/* there are irq names in dts */
342 		for (i = 0; i < match_data->num_irqs; i++) {
343 			irq = platform_get_irq_byname(pdev,
344 						      match_data->irqs[i].name);
345 			if (irq < 0) {
346 				dev_err(dev, "no irq %s in dts\n",
347 					match_data->irqs[i].name);
348 				ret = irq;
349 				goto err;
350 			}
351 			ret = devm_request_irq(dev, irq,
352 					       match_data->irqs[i].irq_hdl,
353 					       IRQF_SHARED,
354 					       dev_driver_string(dev),
355 					       dev);
356 			if (ret < 0) {
357 				dev_err(dev, "request %s failed: %d\n",
358 					match_data->irqs[i].name, ret);
359 				goto err;
360 			}
361 		}
362 	}
363 
364 	for (i = 0; i < match_data->clks_num; i++) {
365 		struct clk *clk = devm_clk_get(dev, match_data->clks[i]);
366 
367 		if (IS_ERR(clk)) {
368 			dev_err(dev, "failed to get %s\n",
369 				match_data->clks[i]);
370 			ret = PTR_ERR(clk);
371 			goto err;
372 		}
373 		hw_dev->clks[i] = clk;
374 	}
375 	hw_dev->clks_num = match_data->clks_num;
376 	hw_dev->clk_rate_tbl = match_data->clk_rate_tbl;
377 	hw_dev->clk_rate_tbl_num = match_data->clk_rate_tbl_num;
378 
379 	hw_dev->reset = devm_reset_control_array_get(dev, false, false);
380 	if (IS_ERR(hw_dev->reset)) {
381 		dev_info(dev, "failed to get cru reset\n");
382 		hw_dev->reset = NULL;
383 	}
384 
385 	hw_dev->dev_num = 0;
386 	hw_dev->cur_dev_id = 0;
387 	hw_dev->ispp_ver = match_data->ispp_ver;
388 	mutex_init(&hw_dev->dev_lock);
389 	spin_lock_init(&hw_dev->irq_lock);
390 	spin_lock_init(&hw_dev->buf_lock);
391 	atomic_set(&hw_dev->refcnt, 0);
392 	INIT_LIST_HEAD(&hw_dev->list);
393 	hw_dev->is_idle = true;
394 	hw_dev->is_single = true;
395 	hw_dev->is_fec_ext = false;
396 	hw_dev->is_dma_contig = true;
397 	hw_dev->is_dma_sg_ops = false;
398 	hw_dev->is_shutdown = false;
399 	hw_dev->is_first = true;
400 	hw_dev->is_mmu = is_iommu_enable(dev);
401 	ret = of_reserved_mem_device_init(dev);
402 	if (ret) {
403 		is_mem_reserved = false;
404 		if (!hw_dev->is_mmu)
405 			dev_info(dev, "No reserved memory region. default cma area!\n");
406 		else
407 			hw_dev->is_dma_contig = false;
408 	}
409 	if (is_mem_reserved) {
410 		/* reserved memory using rdma_sg */
411 		hw_dev->mem_ops = &vb2_rdma_sg_memops;
412 		hw_dev->is_dma_sg_ops = true;
413 	} else if (hw_dev->is_mmu) {
414 		hw_dev->mem_ops = &vb2_dma_sg_memops;
415 		hw_dev->is_dma_sg_ops = true;
416 	} else {
417 		hw_dev->mem_ops = &vb2_dma_contig_memops;
418 	}
419 
420 	rkispp_register_fec(hw_dev);
421 	pm_runtime_enable(&pdev->dev);
422 
423 	return 0;
424 err:
425 	return ret;
426 }
427 
rkispp_hw_remove(struct platform_device * pdev)428 static int rkispp_hw_remove(struct platform_device *pdev)
429 {
430 	struct rkispp_hw_dev *hw_dev = platform_get_drvdata(pdev);
431 
432 	pm_runtime_disable(&pdev->dev);
433 	mutex_destroy(&hw_dev->dev_lock);
434 	rkispp_unregister_fec(hw_dev);
435 	return 0;
436 }
437 
rkispp_hw_shutdown(struct platform_device * pdev)438 static void rkispp_hw_shutdown(struct platform_device *pdev)
439 {
440 	struct rkispp_hw_dev *hw_dev = platform_get_drvdata(pdev);
441 
442 	hw_dev->is_shutdown = true;
443 	if (pm_runtime_active(&pdev->dev)) {
444 		writel(0, hw_dev->base_addr + RKISPP_CTRL_INT_MSK);
445 		writel(GLB_SOFT_RST_ALL, hw_dev->base_addr + RKISPP_CTRL_RESET);
446 		writel(~GLB_SOFT_RST_ALL, hw_dev->base_addr + RKISPP_CTRL_RESET);
447 	}
448 	dev_info(&pdev->dev, "%s\n", __func__);
449 }
450 
rkispp_runtime_suspend(struct device * dev)451 static int __maybe_unused rkispp_runtime_suspend(struct device *dev)
452 {
453 	struct rkispp_hw_dev *hw_dev = dev_get_drvdata(dev);
454 
455 	writel(0, hw_dev->base_addr + RKISPP_CTRL_INT_MSK);
456 	disable_sys_clk(hw_dev);
457 	return 0;
458 }
459 
rkispp_runtime_resume(struct device * dev)460 static int __maybe_unused rkispp_runtime_resume(struct device *dev)
461 {
462 	struct rkispp_hw_dev *hw_dev = dev_get_drvdata(dev);
463 	void __iomem *base = hw_dev->base_addr;
464 	int i;
465 
466 	enable_sys_clk(hw_dev);
467 	rkispp_soft_reset(hw_dev);
468 
469 	for (i = 0; i < hw_dev->dev_num; i++) {
470 		void *buf = hw_dev->ispp[i]->sw_base_addr;
471 
472 		memset(buf, 0, RKISP_ISPP_SW_MAX_SIZE);
473 		memcpy_fromio(buf, base, RKISP_ISPP_SW_REG_SIZE);
474 		default_sw_reg_flag(hw_dev->ispp[i]);
475 	}
476 	hw_dev->is_idle = true;
477 	return 0;
478 }
479 
480 static const struct dev_pm_ops rkispp_hw_pm_ops = {
481 	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
482 				pm_runtime_force_resume)
483 	SET_RUNTIME_PM_OPS(rkispp_runtime_suspend,
484 			   rkispp_runtime_resume, NULL)
485 };
486 
487 static struct platform_driver rkispp_hw_drv = {
488 	.driver = {
489 		.name = "rkispp_hw",
490 		.of_match_table = of_match_ptr(rkispp_hw_of_match),
491 		.pm = &rkispp_hw_pm_ops,
492 	},
493 	.probe = rkispp_hw_probe,
494 	.remove = rkispp_hw_remove,
495 	.shutdown = rkispp_hw_shutdown,
496 };
497 
rkispp_hw_drv_init(void)498 int __init rkispp_hw_drv_init(void)
499 {
500 	int ret;
501 
502 	ret = platform_driver_register(&rkispp_hw_drv);
503 	if (!ret)
504 		ret = platform_driver_register(&rkispp_plat_drv);
505 	return ret;
506 }
507 
508 #if !(IS_BUILTIN(CONFIG_VIDEO_ROCKCHIP_ISP) && IS_BUILTIN(CONFIG_VIDEO_ROCKCHIP_ISPP))
509 module_init(rkispp_hw_drv_init);
510 #endif
511