1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Cedrus VPU driver
4 *
5 * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com>
6 * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com>
7 * Copyright (C) 2018 Bootlin
8 *
9 * Based on the vim2m driver, that is:
10 *
11 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
12 * Pawel Osciak, <pawel@osciak.com>
13 * Marek Szyprowski, <m.szyprowski@samsung.com>
14 */
15
16 #include <linux/platform_device.h>
17 #include <linux/of_reserved_mem.h>
18 #include <linux/of_device.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/interrupt.h>
21 #include <linux/clk.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/regmap.h>
24 #include <linux/reset.h>
25 #include <linux/soc/sunxi/sunxi_sram.h>
26
27 #include <media/videobuf2-core.h>
28 #include <media/v4l2-mem2mem.h>
29
30 #include "cedrus.h"
31 #include "cedrus_hw.h"
32 #include "cedrus_regs.h"
33
cedrus_engine_enable(struct cedrus_ctx * ctx,enum cedrus_codec codec)34 int cedrus_engine_enable(struct cedrus_ctx *ctx, enum cedrus_codec codec)
35 {
36 u32 reg = 0;
37
38 /*
39 * FIXME: This is only valid on 32-bits DDR's, we should test
40 * it on the A13/A33.
41 */
42 reg |= VE_MODE_REC_WR_MODE_2MB;
43 reg |= VE_MODE_DDR_MODE_BW_128;
44
45 switch (codec) {
46 case CEDRUS_CODEC_MPEG2:
47 reg |= VE_MODE_DEC_MPEG;
48 break;
49
50 case CEDRUS_CODEC_H264:
51 reg |= VE_MODE_DEC_H264;
52 break;
53
54 case CEDRUS_CODEC_H265:
55 reg |= VE_MODE_DEC_H265;
56 break;
57
58 default:
59 return -EINVAL;
60 }
61
62 if (ctx->src_fmt.width == 4096)
63 reg |= VE_MODE_PIC_WIDTH_IS_4096;
64 if (ctx->src_fmt.width > 2048)
65 reg |= VE_MODE_PIC_WIDTH_MORE_2048;
66
67 cedrus_write(ctx->dev, VE_MODE, reg);
68
69 return 0;
70 }
71
cedrus_engine_disable(struct cedrus_dev * dev)72 void cedrus_engine_disable(struct cedrus_dev *dev)
73 {
74 cedrus_write(dev, VE_MODE, VE_MODE_DISABLED);
75 }
76
cedrus_dst_format_set(struct cedrus_dev * dev,struct v4l2_pix_format * fmt)77 void cedrus_dst_format_set(struct cedrus_dev *dev,
78 struct v4l2_pix_format *fmt)
79 {
80 unsigned int width = fmt->width;
81 unsigned int height = fmt->height;
82 u32 chroma_size;
83 u32 reg;
84
85 switch (fmt->pixelformat) {
86 case V4L2_PIX_FMT_NV12:
87 chroma_size = ALIGN(width, 16) * ALIGN(height, 16) / 2;
88
89 reg = VE_PRIMARY_OUT_FMT_NV12;
90 cedrus_write(dev, VE_PRIMARY_OUT_FMT, reg);
91
92 reg = chroma_size / 2;
93 cedrus_write(dev, VE_PRIMARY_CHROMA_BUF_LEN, reg);
94
95 reg = VE_PRIMARY_FB_LINE_STRIDE_LUMA(ALIGN(width, 16)) |
96 VE_PRIMARY_FB_LINE_STRIDE_CHROMA(ALIGN(width, 16) / 2);
97 cedrus_write(dev, VE_PRIMARY_FB_LINE_STRIDE, reg);
98
99 break;
100 case V4L2_PIX_FMT_SUNXI_TILED_NV12:
101 default:
102 reg = VE_PRIMARY_OUT_FMT_TILED_32_NV12;
103 cedrus_write(dev, VE_PRIMARY_OUT_FMT, reg);
104
105 reg = VE_SECONDARY_OUT_FMT_TILED_32_NV12;
106 cedrus_write(dev, VE_CHROMA_BUF_LEN, reg);
107
108 break;
109 }
110 }
111
cedrus_irq(int irq,void * data)112 static irqreturn_t cedrus_irq(int irq, void *data)
113 {
114 struct cedrus_dev *dev = data;
115 struct cedrus_ctx *ctx;
116 enum vb2_buffer_state state;
117 enum cedrus_irq_status status;
118
119 ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
120 if (!ctx) {
121 v4l2_err(&dev->v4l2_dev,
122 "Instance released before the end of transaction\n");
123 return IRQ_NONE;
124 }
125
126 status = dev->dec_ops[ctx->current_codec]->irq_status(ctx);
127 if (status == CEDRUS_IRQ_NONE)
128 return IRQ_NONE;
129
130 dev->dec_ops[ctx->current_codec]->irq_disable(ctx);
131 dev->dec_ops[ctx->current_codec]->irq_clear(ctx);
132
133 if (status == CEDRUS_IRQ_ERROR)
134 state = VB2_BUF_STATE_ERROR;
135 else
136 state = VB2_BUF_STATE_DONE;
137
138 v4l2_m2m_buf_done_and_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx,
139 state);
140
141 return IRQ_HANDLED;
142 }
143
cedrus_hw_suspend(struct device * device)144 int cedrus_hw_suspend(struct device *device)
145 {
146 struct cedrus_dev *dev = dev_get_drvdata(device);
147
148 clk_disable_unprepare(dev->ram_clk);
149 clk_disable_unprepare(dev->mod_clk);
150 clk_disable_unprepare(dev->ahb_clk);
151
152 reset_control_assert(dev->rstc);
153
154 return 0;
155 }
156
cedrus_hw_resume(struct device * device)157 int cedrus_hw_resume(struct device *device)
158 {
159 struct cedrus_dev *dev = dev_get_drvdata(device);
160 int ret;
161
162 ret = reset_control_reset(dev->rstc);
163 if (ret) {
164 dev_err(dev->dev, "Failed to apply reset\n");
165
166 return ret;
167 }
168
169 ret = clk_prepare_enable(dev->ahb_clk);
170 if (ret) {
171 dev_err(dev->dev, "Failed to enable AHB clock\n");
172
173 goto err_rst;
174 }
175
176 ret = clk_prepare_enable(dev->mod_clk);
177 if (ret) {
178 dev_err(dev->dev, "Failed to enable MOD clock\n");
179
180 goto err_ahb_clk;
181 }
182
183 ret = clk_prepare_enable(dev->ram_clk);
184 if (ret) {
185 dev_err(dev->dev, "Failed to enable RAM clock\n");
186
187 goto err_mod_clk;
188 }
189
190 return 0;
191
192 err_mod_clk:
193 clk_disable_unprepare(dev->mod_clk);
194 err_ahb_clk:
195 clk_disable_unprepare(dev->ahb_clk);
196 err_rst:
197 reset_control_assert(dev->rstc);
198
199 return ret;
200 }
201
cedrus_hw_probe(struct cedrus_dev * dev)202 int cedrus_hw_probe(struct cedrus_dev *dev)
203 {
204 const struct cedrus_variant *variant;
205 int irq_dec;
206 int ret;
207
208 variant = of_device_get_match_data(dev->dev);
209 if (!variant)
210 return -EINVAL;
211
212 dev->capabilities = variant->capabilities;
213
214 irq_dec = platform_get_irq(dev->pdev, 0);
215 if (irq_dec <= 0)
216 return irq_dec;
217 ret = devm_request_irq(dev->dev, irq_dec, cedrus_irq,
218 0, dev_name(dev->dev), dev);
219 if (ret) {
220 dev_err(dev->dev, "Failed to request IRQ\n");
221
222 return ret;
223 }
224
225 /*
226 * The VPU is only able to handle bus addresses so we have to subtract
227 * the RAM offset to the physcal addresses.
228 *
229 * This information will eventually be obtained from device-tree.
230 *
231 * XXX(hch): this has no business in a driver and needs to move
232 * to the device tree.
233 */
234
235 #ifdef PHYS_PFN_OFFSET
236 if (!(variant->quirks & CEDRUS_QUIRK_NO_DMA_OFFSET)) {
237 ret = dma_direct_set_offset(dev->dev, PHYS_OFFSET, 0, SZ_4G);
238 if (ret)
239 return ret;
240 }
241 #endif
242
243 ret = of_reserved_mem_device_init(dev->dev);
244 if (ret && ret != -ENODEV) {
245 dev_err(dev->dev, "Failed to reserve memory\n");
246
247 return ret;
248 }
249
250 ret = sunxi_sram_claim(dev->dev);
251 if (ret) {
252 dev_err(dev->dev, "Failed to claim SRAM\n");
253
254 goto err_mem;
255 }
256
257 dev->ahb_clk = devm_clk_get(dev->dev, "ahb");
258 if (IS_ERR(dev->ahb_clk)) {
259 dev_err(dev->dev, "Failed to get AHB clock\n");
260
261 ret = PTR_ERR(dev->ahb_clk);
262 goto err_sram;
263 }
264
265 dev->mod_clk = devm_clk_get(dev->dev, "mod");
266 if (IS_ERR(dev->mod_clk)) {
267 dev_err(dev->dev, "Failed to get MOD clock\n");
268
269 ret = PTR_ERR(dev->mod_clk);
270 goto err_sram;
271 }
272
273 dev->ram_clk = devm_clk_get(dev->dev, "ram");
274 if (IS_ERR(dev->ram_clk)) {
275 dev_err(dev->dev, "Failed to get RAM clock\n");
276
277 ret = PTR_ERR(dev->ram_clk);
278 goto err_sram;
279 }
280
281 dev->rstc = devm_reset_control_get(dev->dev, NULL);
282 if (IS_ERR(dev->rstc)) {
283 dev_err(dev->dev, "Failed to get reset control\n");
284
285 ret = PTR_ERR(dev->rstc);
286 goto err_sram;
287 }
288
289 dev->base = devm_platform_ioremap_resource(dev->pdev, 0);
290 if (IS_ERR(dev->base)) {
291 dev_err(dev->dev, "Failed to map registers\n");
292
293 ret = PTR_ERR(dev->base);
294 goto err_sram;
295 }
296
297 ret = clk_set_rate(dev->mod_clk, variant->mod_rate);
298 if (ret) {
299 dev_err(dev->dev, "Failed to set clock rate\n");
300
301 goto err_sram;
302 }
303
304 pm_runtime_enable(dev->dev);
305 if (!pm_runtime_enabled(dev->dev)) {
306 ret = cedrus_hw_resume(dev->dev);
307 if (ret)
308 goto err_pm;
309 }
310
311 return 0;
312
313 err_pm:
314 pm_runtime_disable(dev->dev);
315 err_sram:
316 sunxi_sram_release(dev->dev);
317 err_mem:
318 of_reserved_mem_device_release(dev->dev);
319
320 return ret;
321 }
322
cedrus_hw_remove(struct cedrus_dev * dev)323 void cedrus_hw_remove(struct cedrus_dev *dev)
324 {
325 pm_runtime_disable(dev->dev);
326 if (!pm_runtime_status_suspended(dev->dev))
327 cedrus_hw_suspend(dev->dev);
328
329 sunxi_sram_release(dev->dev);
330
331 of_reserved_mem_device_release(dev->dev);
332 }
333