1 // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2 /*
3 * Copyright (c) 2021 Rockchip Electronics Co., Ltd
4 *
5 * author:
6 * Ding Wei, leo.ding@rock-chips.com
7 *
8 */
9
10 #include <asm/cacheflush.h>
11 #include <linux/delay.h>
12 #include <linux/devfreq.h>
13 #include <linux/devfreq_cooling.h>
14 #include <linux/iopoll.h>
15 #include <linux/interrupt.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/of_platform.h>
20 #include <linux/of_address.h>
21 #include <linux/slab.h>
22 #include <linux/seq_file.h>
23 #include <linux/uaccess.h>
24 #include <linux/regmap.h>
25 #include <linux/regulator/consumer.h>
26 #include <linux/proc_fs.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/nospec.h>
29 #include <linux/workqueue.h>
30 #include <linux/dma-iommu.h>
31 #include <soc/rockchip/pm_domains.h>
32 #include <soc/rockchip/rockchip_ipa.h>
33 #include <soc/rockchip/rockchip_opp_select.h>
34 #include <soc/rockchip/rockchip_system_monitor.h>
35
36 #include "mpp_debug.h"
37 #include "mpp_iommu.h"
38 #include "mpp_common.h"
39
40 #define RKVENC_DRIVER_NAME "mpp_rkvenc2"
41
42 #define RKVENC_SESSION_MAX_BUFFERS 40
43 #define RKVENC_MAX_CORE_NUM 4
44
45 #define to_rkvenc_info(info) \
46 container_of(info, struct rkvenc_hw_info, hw)
47 #define to_rkvenc_task(ctx) \
48 container_of(ctx, struct rkvenc_task, mpp_task)
49 #define to_rkvenc_dev(dev) \
50 container_of(dev, struct rkvenc_dev, mpp)
51
52
53 enum RKVENC_FORMAT_TYPE {
54 RKVENC_FMT_BASE = 0x0000,
55 RKVENC_FMT_H264E = RKVENC_FMT_BASE + 0,
56 RKVENC_FMT_H265E = RKVENC_FMT_BASE + 1,
57
58 RKVENC_FMT_OSD_BASE = 0x1000,
59 RKVENC_FMT_H264E_OSD = RKVENC_FMT_OSD_BASE + 0,
60 RKVENC_FMT_H265E_OSD = RKVENC_FMT_OSD_BASE + 1,
61 RKVENC_FMT_BUTT,
62 };
63
64 enum RKVENC_CLASS_TYPE {
65 RKVENC_CLASS_BASE = 0, /* base */
66 RKVENC_CLASS_PIC = 1, /* picture configure */
67 RKVENC_CLASS_RC = 2, /* rate control */
68 RKVENC_CLASS_PAR = 3, /* parameter */
69 RKVENC_CLASS_SQI = 4, /* subjective Adjust */
70 RKVENC_CLASS_SCL = 5, /* scaling list */
71 RKVENC_CLASS_OSD = 6, /* osd */
72 RKVENC_CLASS_ST = 7, /* status */
73 RKVENC_CLASS_DEBUG = 8, /* debug */
74 RKVENC_CLASS_BUTT,
75 };
76
77 enum RKVENC_CLASS_FD_TYPE {
78 RKVENC_CLASS_FD_BASE = 0, /* base */
79 RKVENC_CLASS_FD_OSD = 1, /* osd */
80 RKVENC_CLASS_FD_BUTT,
81 };
82
83 struct rkvenc_reg_msg {
84 u32 base_s;
85 u32 base_e;
86 };
87
88 struct rkvenc_hw_info {
89 struct mpp_hw_info hw;
90 /* for register range check */
91 u32 reg_class;
92 struct rkvenc_reg_msg reg_msg[RKVENC_CLASS_BUTT];
93 /* for fd translate */
94 u32 fd_class;
95 struct {
96 u32 class;
97 u32 base_fmt;
98 } fd_reg[RKVENC_CLASS_FD_BUTT];
99 /* for get format */
100 struct {
101 u32 class;
102 u32 base;
103 u32 bitpos;
104 u32 bitlen;
105 } fmt_reg;
106 /* register info */
107 u32 enc_start_base;
108 u32 enc_clr_base;
109 u32 int_en_base;
110 u32 int_mask_base;
111 u32 int_clr_base;
112 u32 int_sta_base;
113 u32 enc_wdg_base;
114 u32 err_mask;
115 };
116
117 struct rkvenc_task {
118 struct mpp_task mpp_task;
119 int fmt;
120 struct rkvenc_hw_info *hw_info;
121
122 /* class register */
123 struct {
124 u32 valid;
125 u32 *data;
126 u32 size;
127 } reg[RKVENC_CLASS_BUTT];
128 /* register offset info */
129 struct reg_offset_info off_inf;
130
131 enum MPP_CLOCK_MODE clk_mode;
132 u32 irq_status;
133 /* req for current task */
134 u32 w_req_cnt;
135 struct mpp_request w_reqs[MPP_MAX_MSG_NUM];
136 u32 r_req_cnt;
137 struct mpp_request r_reqs[MPP_MAX_MSG_NUM];
138 struct mpp_dma_buffer *table;
139 u32 task_no;
140 };
141
142 #define RKVENC_MAX_RCB_NUM (4)
143
144 struct rcb_info_elem {
145 u32 index;
146 u32 size;
147 };
148
149 struct rkvenc2_rcb_info {
150 u32 cnt;
151 struct rcb_info_elem elem[RKVENC_MAX_RCB_NUM];
152 };
153
154 struct rkvenc2_session_priv {
155 struct rw_semaphore rw_sem;
156 /* codec info from user */
157 struct {
158 /* show mode */
159 u32 flag;
160 /* item data */
161 u64 val;
162 } codec_info[ENC_INFO_BUTT];
163 /* rcb_info for sram */
164 struct rkvenc2_rcb_info rcb_inf;
165 };
166
167 struct rkvenc_dev {
168 struct mpp_dev mpp;
169 struct rkvenc_hw_info *hw_info;
170
171 struct mpp_clk_info aclk_info;
172 struct mpp_clk_info hclk_info;
173 struct mpp_clk_info core_clk_info;
174 u32 default_max_load;
175 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
176 struct proc_dir_entry *procfs;
177 #endif
178 struct reset_control *rst_a;
179 struct reset_control *rst_h;
180 struct reset_control *rst_core;
181 /* for ccu */
182 struct rkvenc_ccu *ccu;
183 struct list_head core_link;
184 u32 disable_work;
185
186 /* internal rcb-memory */
187 u32 sram_size;
188 u32 sram_used;
189 dma_addr_t sram_iova;
190 u32 sram_enabled;
191 struct page *rcb_page;
192 };
193
194 struct rkvenc_ccu {
195 u32 core_num;
196 /* lock for core attach */
197 struct mutex lock;
198 struct list_head core_list;
199 struct mpp_dev *main_core;
200 };
201
202 static struct rkvenc_hw_info rkvenc_v2_hw_info = {
203 .hw = {
204 .reg_num = 254,
205 .reg_id = 0,
206 .reg_en = 4,
207 .reg_start = 160,
208 .reg_end = 253,
209 },
210 .reg_class = RKVENC_CLASS_BUTT,
211 .reg_msg[RKVENC_CLASS_BASE] = {
212 .base_s = 0x0000,
213 .base_e = 0x0058,
214 },
215 .reg_msg[RKVENC_CLASS_PIC] = {
216 .base_s = 0x0280,
217 .base_e = 0x03f4,
218 },
219 .reg_msg[RKVENC_CLASS_RC] = {
220 .base_s = 0x1000,
221 .base_e = 0x10e0,
222 },
223 .reg_msg[RKVENC_CLASS_PAR] = {
224 .base_s = 0x1700,
225 .base_e = 0x1cd4,
226 },
227 .reg_msg[RKVENC_CLASS_SQI] = {
228 .base_s = 0x2000,
229 .base_e = 0x21e4,
230 },
231 .reg_msg[RKVENC_CLASS_SCL] = {
232 .base_s = 0x2200,
233 .base_e = 0x2c98,
234 },
235 .reg_msg[RKVENC_CLASS_OSD] = {
236 .base_s = 0x3000,
237 .base_e = 0x347c,
238 },
239 .reg_msg[RKVENC_CLASS_ST] = {
240 .base_s = 0x4000,
241 .base_e = 0x42cc,
242 },
243 .reg_msg[RKVENC_CLASS_DEBUG] = {
244 .base_s = 0x5000,
245 .base_e = 0x5354,
246 },
247 .fd_class = RKVENC_CLASS_FD_BUTT,
248 .fd_reg[RKVENC_CLASS_FD_BASE] = {
249 .class = RKVENC_CLASS_PIC,
250 .base_fmt = RKVENC_FMT_BASE,
251 },
252 .fd_reg[RKVENC_CLASS_FD_OSD] = {
253 .class = RKVENC_CLASS_OSD,
254 .base_fmt = RKVENC_FMT_OSD_BASE,
255 },
256 .fmt_reg = {
257 .class = RKVENC_CLASS_PIC,
258 .base = 0x0300,
259 .bitpos = 0,
260 .bitlen = 1,
261 },
262 .enc_start_base = 0x0010,
263 .enc_clr_base = 0x0014,
264 .int_en_base = 0x0020,
265 .int_mask_base = 0x0024,
266 .int_clr_base = 0x0028,
267 .int_sta_base = 0x002c,
268 .enc_wdg_base = 0x0038,
269 .err_mask = 0x03f0,
270 };
271
272 /*
273 * file handle translate information for v2
274 */
275 static const u16 trans_tbl_h264e_v2[] = {
276 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
277 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
278 20, 21, 22, 23,
279 };
280
281 static const u16 trans_tbl_h264e_v2_osd[] = {
282 20, 21, 22, 23, 24, 25, 26, 27,
283 };
284
285 static const u16 trans_tbl_h265e_v2[] = {
286 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
287 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
288 20, 21, 22, 23,
289 };
290
291 static const u16 trans_tbl_h265e_v2_osd[] = {
292 20, 21, 22, 23, 24, 25, 26, 27,
293 };
294
295 static struct mpp_trans_info trans_rkvenc_v2[] = {
296 [RKVENC_FMT_H264E] = {
297 .count = ARRAY_SIZE(trans_tbl_h264e_v2),
298 .table = trans_tbl_h264e_v2,
299 },
300 [RKVENC_FMT_H264E_OSD] = {
301 .count = ARRAY_SIZE(trans_tbl_h264e_v2_osd),
302 .table = trans_tbl_h264e_v2_osd,
303 },
304 [RKVENC_FMT_H265E] = {
305 .count = ARRAY_SIZE(trans_tbl_h265e_v2),
306 .table = trans_tbl_h265e_v2,
307 },
308 [RKVENC_FMT_H265E_OSD] = {
309 .count = ARRAY_SIZE(trans_tbl_h265e_v2_osd),
310 .table = trans_tbl_h265e_v2_osd,
311 },
312 };
313
req_over_class(struct mpp_request * req,struct rkvenc_task * task,int class)314 static bool req_over_class(struct mpp_request *req,
315 struct rkvenc_task *task, int class)
316 {
317 bool ret;
318 u32 base_s, base_e, req_e;
319 struct rkvenc_hw_info *hw = task->hw_info;
320
321 base_s = hw->reg_msg[class].base_s;
322 base_e = hw->reg_msg[class].base_e;
323 req_e = req->offset + req->size - sizeof(u32);
324
325 ret = (req->offset <= base_e && req_e >= base_s) ? true : false;
326
327 return ret;
328 }
329
rkvenc_free_class_msg(struct rkvenc_task * task)330 static int rkvenc_free_class_msg(struct rkvenc_task *task)
331 {
332 u32 i;
333 u32 reg_class = task->hw_info->reg_class;
334
335 for (i = 0; i < reg_class; i++) {
336 kfree(task->reg[i].data);
337 task->reg[i].size = 0;
338 }
339
340 return 0;
341 }
342
rkvenc_alloc_class_msg(struct rkvenc_task * task,int class)343 static int rkvenc_alloc_class_msg(struct rkvenc_task *task, int class)
344 {
345 u32 *data;
346 struct rkvenc_hw_info *hw = task->hw_info;
347
348 if (!task->reg[class].data) {
349 u32 base_s = hw->reg_msg[class].base_s;
350 u32 base_e = hw->reg_msg[class].base_e;
351 u32 class_size = base_e - base_s + sizeof(u32);
352
353 data = kzalloc(class_size, GFP_KERNEL);
354 if (!data)
355 return -ENOMEM;
356 task->reg[class].data = data;
357 task->reg[class].size = class_size;
358 }
359
360 return 0;
361 }
362
rkvenc_update_req(struct rkvenc_task * task,int class,struct mpp_request * req_in,struct mpp_request * req_out)363 static int rkvenc_update_req(struct rkvenc_task *task, int class,
364 struct mpp_request *req_in,
365 struct mpp_request *req_out)
366 {
367 u32 base_s, base_e, req_e, s, e;
368 struct rkvenc_hw_info *hw = task->hw_info;
369
370 base_s = hw->reg_msg[class].base_s;
371 base_e = hw->reg_msg[class].base_e;
372 req_e = req_in->offset + req_in->size - sizeof(u32);
373 s = max(req_in->offset, base_s);
374 e = min(req_e, base_e);
375
376 req_out->offset = s;
377 req_out->size = e - s + sizeof(u32);
378 req_out->data = (u8 *)req_in->data + (s - req_in->offset);
379
380 return 0;
381 }
382
rkvenc_get_class_msg(struct rkvenc_task * task,u32 addr,struct mpp_request * msg)383 static int rkvenc_get_class_msg(struct rkvenc_task *task,
384 u32 addr, struct mpp_request *msg)
385 {
386 int i;
387 bool found = false;
388 u32 base_s, base_e;
389 struct rkvenc_hw_info *hw = task->hw_info;
390
391 if (!msg)
392 return -EINVAL;
393
394 memset(msg, 0, sizeof(*msg));
395 for (i = 0; i < hw->reg_class; i++) {
396 base_s = hw->reg_msg[i].base_s;
397 base_e = hw->reg_msg[i].base_e;
398 if (addr >= base_s && addr < base_e) {
399 found = true;
400 msg->offset = base_s;
401 msg->size = task->reg[i].size;
402 msg->data = task->reg[i].data;
403 break;
404 }
405 }
406
407 return (found ? 0 : (-EINVAL));
408 }
409
rkvenc_get_class_reg(struct rkvenc_task * task,u32 addr)410 static u32 *rkvenc_get_class_reg(struct rkvenc_task *task, u32 addr)
411 {
412 int i;
413 u8 *reg = NULL;
414 u32 base_s, base_e;
415 struct rkvenc_hw_info *hw = task->hw_info;
416
417 for (i = 0; i < hw->reg_class; i++) {
418 base_s = hw->reg_msg[i].base_s;
419 base_e = hw->reg_msg[i].base_e;
420 if (addr >= base_s && addr < base_e) {
421 reg = (u8 *)task->reg[i].data + (addr - base_s);
422 break;
423 }
424 }
425
426 return (u32 *)reg;
427 }
428
rkvenc2_extract_rcb_info(struct rkvenc2_rcb_info * rcb_inf,struct mpp_request * req)429 static int rkvenc2_extract_rcb_info(struct rkvenc2_rcb_info *rcb_inf,
430 struct mpp_request *req)
431 {
432 int max_size = ARRAY_SIZE(rcb_inf->elem);
433 int cnt = req->size / sizeof(rcb_inf->elem[0]);
434
435 if (req->size > sizeof(rcb_inf->elem)) {
436 mpp_err("count %d,max_size %d\n", cnt, max_size);
437 return -EINVAL;
438 }
439 if (copy_from_user(rcb_inf->elem, req->data, req->size)) {
440 mpp_err("copy_from_user failed\n");
441 return -EINVAL;
442 }
443 rcb_inf->cnt = cnt;
444
445 return 0;
446 }
447
rkvenc_extract_task_msg(struct mpp_session * session,struct rkvenc_task * task,struct mpp_task_msgs * msgs)448 static int rkvenc_extract_task_msg(struct mpp_session *session,
449 struct rkvenc_task *task,
450 struct mpp_task_msgs *msgs)
451 {
452 int ret;
453 u32 i, j;
454 struct mpp_request *req;
455 struct rkvenc_hw_info *hw = task->hw_info;
456
457 mpp_debug_enter();
458
459 for (i = 0; i < msgs->req_cnt; i++) {
460 req = &msgs->reqs[i];
461 if (!req->size)
462 continue;
463
464 switch (req->cmd) {
465 case MPP_CMD_SET_REG_WRITE: {
466 void *data;
467 struct mpp_request *wreq;
468
469 for (j = 0; j < hw->reg_class; j++) {
470 if (!req_over_class(req, task, j))
471 continue;
472
473 ret = rkvenc_alloc_class_msg(task, j);
474 if (ret) {
475 mpp_err("alloc class msg %d fail.\n", j);
476 goto fail;
477 }
478 wreq = &task->w_reqs[task->w_req_cnt];
479 rkvenc_update_req(task, j, req, wreq);
480 data = rkvenc_get_class_reg(task, wreq->offset);
481 if (!data)
482 goto fail;
483 if (copy_from_user(data, wreq->data, wreq->size)) {
484 mpp_err("copy_from_user fail, offset %08x\n", wreq->offset);
485 ret = -EIO;
486 goto fail;
487 }
488 task->reg[j].valid = 1;
489 task->w_req_cnt++;
490 }
491 } break;
492 case MPP_CMD_SET_REG_READ: {
493 struct mpp_request *rreq;
494
495 for (j = 0; j < hw->reg_class; j++) {
496 if (!req_over_class(req, task, j))
497 continue;
498
499 ret = rkvenc_alloc_class_msg(task, j);
500 if (ret) {
501 mpp_err("alloc class msg reg %d fail.\n", j);
502 goto fail;
503 }
504 rreq = &task->r_reqs[task->r_req_cnt];
505 rkvenc_update_req(task, j, req, rreq);
506 task->reg[j].valid = 1;
507 task->r_req_cnt++;
508 }
509 } break;
510 case MPP_CMD_SET_REG_ADDR_OFFSET: {
511 mpp_extract_reg_offset_info(&task->off_inf, req);
512 } break;
513 case MPP_CMD_SET_RCB_INFO: {
514 struct rkvenc2_session_priv *priv = session->priv;
515
516 if (priv)
517 rkvenc2_extract_rcb_info(&priv->rcb_inf, req);
518 } break;
519 default:
520 break;
521 }
522 }
523 mpp_debug(DEBUG_TASK_INFO, "w_req_cnt=%d, r_req_cnt=%d\n",
524 task->w_req_cnt, task->r_req_cnt);
525
526 mpp_debug_enter();
527 return 0;
528
529 fail:
530 rkvenc_free_class_msg(task);
531
532 mpp_debug_enter();
533 return ret;
534 }
535
rkvenc_task_get_format(struct mpp_dev * mpp,struct rkvenc_task * task)536 static int rkvenc_task_get_format(struct mpp_dev *mpp,
537 struct rkvenc_task *task)
538 {
539 u32 offset, val;
540
541 struct rkvenc_hw_info *hw = task->hw_info;
542 u32 class = hw->fmt_reg.class;
543 u32 *class_reg = task->reg[class].data;
544 u32 class_size = task->reg[class].size;
545 u32 class_base = hw->reg_msg[class].base_s;
546 u32 bitpos = hw->fmt_reg.bitpos;
547 u32 bitlen = hw->fmt_reg.bitlen;
548
549 if (!class_reg || !class_size)
550 return -EINVAL;
551
552 offset = hw->fmt_reg.base - class_base;
553 val = class_reg[offset/sizeof(u32)];
554 task->fmt = (val >> bitpos) & ((1 << bitlen) - 1);
555
556 return 0;
557 }
558
rkvenc_core_balance(struct rkvenc_ccu * ccu)559 static struct rkvenc_dev *rkvenc_core_balance(struct rkvenc_ccu *ccu)
560 {
561 struct rkvenc_dev *enc;
562 struct rkvenc_dev *core = NULL, *n;
563
564 mpp_debug_enter();
565
566 mutex_lock(&ccu->lock);
567 enc = list_first_entry(&ccu->core_list, struct rkvenc_dev, core_link);
568 list_for_each_entry_safe(core, n, &ccu->core_list, core_link) {
569 mpp_debug(DEBUG_DEVICE, "%s, disable_work=%d, task_count=%d, task_index=%d\n",
570 dev_name(core->mpp.dev), core->disable_work,
571 atomic_read(&core->mpp.task_count), atomic_read(&core->mpp.task_index));
572 /* if core (except main-core) disabled, skip it */
573 if (core->disable_work)
574 continue;
575 /* choose core with less task in queue */
576 if (atomic_read(&core->mpp.task_count) < atomic_read(&enc->mpp.task_count)) {
577 enc = core;
578 break;
579 }
580 /* choose core with less task which done */
581 if (atomic_read(&core->mpp.task_index) < atomic_read(&enc->mpp.task_index))
582 enc = core;
583 }
584 mutex_unlock(&ccu->lock);
585
586 mpp_debug_leave();
587
588 return enc;
589 }
590
rkvenc2_set_rcbbuf(struct mpp_dev * mpp,struct mpp_session * session,struct rkvenc_task * task)591 static int rkvenc2_set_rcbbuf(struct mpp_dev *mpp, struct mpp_session *session,
592 struct rkvenc_task *task)
593 {
594 struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
595 struct rkvenc2_session_priv *priv = session->priv;
596 u32 sram_enabled = 0;
597
598 mpp_debug_enter();
599
600 if (priv && enc->sram_iova) {
601 int i;
602 u32 *reg;
603 u32 reg_idx, rcb_size, rcb_offset;
604 struct rkvenc2_rcb_info *rcb_inf = &priv->rcb_inf;
605
606 rcb_offset = 0;
607 for (i = 0; i < rcb_inf->cnt; i++) {
608 reg_idx = rcb_inf->elem[i].index;
609 rcb_size = rcb_inf->elem[i].size;
610
611 if (rcb_offset > enc->sram_size ||
612 (rcb_offset + rcb_size) > enc->sram_used)
613 continue;
614
615 mpp_debug(DEBUG_SRAM_INFO, "rcb: reg %d offset %d, size %d\n",
616 reg_idx, rcb_offset, rcb_size);
617
618 reg = rkvenc_get_class_reg(task, reg_idx * sizeof(u32));
619 if (reg)
620 *reg = enc->sram_iova + rcb_offset;
621
622 rcb_offset += rcb_size;
623 sram_enabled = 1;
624 }
625 }
626 if (enc->sram_enabled != sram_enabled) {
627 mpp_debug(DEBUG_SRAM_INFO, "sram %s\n", sram_enabled ? "enabled" : "disabled");
628 enc->sram_enabled = sram_enabled;
629 }
630
631 mpp_debug_leave();
632
633 return 0;
634 }
635
rkvenc_alloc_task(struct mpp_session * session,struct mpp_task_msgs * msgs)636 static void *rkvenc_alloc_task(struct mpp_session *session,
637 struct mpp_task_msgs *msgs)
638 {
639 int ret;
640 struct rkvenc_task *task;
641 struct mpp_task *mpp_task;
642 struct mpp_dev *mpp = session->mpp;
643
644 mpp_debug_enter();
645
646 task = kzalloc(sizeof(*task), GFP_KERNEL);
647 if (!task)
648 return NULL;
649
650 mpp_task = &task->mpp_task;
651 mpp_task_init(session, mpp_task);
652 mpp_task->hw_info = mpp->var->hw_info;
653 task->hw_info = to_rkvenc_info(mpp_task->hw_info);
654 /* extract reqs for current task */
655 ret = rkvenc_extract_task_msg(session, task, msgs);
656 if (ret)
657 goto free_task;
658 mpp_task->reg = task->reg[0].data;
659 /* get format */
660 ret = rkvenc_task_get_format(mpp, task);
661 if (ret)
662 goto free_task;
663 /* process fd in register */
664 if (!(msgs->flags & MPP_FLAGS_REG_FD_NO_TRANS)) {
665 u32 i, j;
666 int cnt;
667 u32 off;
668 const u16 *tbl;
669 struct rkvenc_hw_info *hw = task->hw_info;
670
671 for (i = 0; i < hw->fd_class; i++) {
672 u32 class = hw->fd_reg[i].class;
673 u32 fmt = hw->fd_reg[i].base_fmt + task->fmt;
674 u32 *reg = task->reg[class].data;
675 u32 ss = hw->reg_msg[class].base_s / sizeof(u32);
676
677 if (!reg)
678 continue;
679
680 ret = mpp_translate_reg_address(session, mpp_task, fmt, reg, NULL);
681 if (ret)
682 goto fail;
683
684 cnt = mpp->var->trans_info[fmt].count;
685 tbl = mpp->var->trans_info[fmt].table;
686 for (j = 0; j < cnt; j++) {
687 off = mpp_query_reg_offset_info(&task->off_inf, tbl[j] + ss);
688 mpp_debug(DEBUG_IOMMU, "reg[%d] + offset %d\n", tbl[j] + ss, off);
689 reg[tbl[j]] += off;
690 }
691 }
692 }
693 rkvenc2_set_rcbbuf(mpp, session, task);
694 task->clk_mode = CLK_MODE_NORMAL;
695
696 mpp_debug_leave();
697
698 return mpp_task;
699
700 fail:
701 mpp_task_dump_mem_region(mpp, mpp_task);
702 mpp_task_dump_reg(mpp, mpp_task);
703 mpp_task_finalize(session, mpp_task);
704 /* free class register buffer */
705 rkvenc_free_class_msg(task);
706 free_task:
707 kfree(task);
708
709 return NULL;
710 }
711
rkvenc_ccu_alloc_task(struct mpp_session * session,struct mpp_task_msgs * msgs)712 static void *rkvenc_ccu_alloc_task(struct mpp_session *session,
713 struct mpp_task_msgs *msgs)
714 {
715 struct rkvenc_dev *enc = to_rkvenc_dev(session->mpp);
716
717 /* if multi-cores, choose one for current task */
718 if (enc->ccu) {
719 enc = rkvenc_core_balance(enc->ccu);
720 session->mpp = &enc->mpp;
721 }
722
723 return rkvenc_alloc_task(session, msgs);
724 }
725
rkvenc2_prepare(struct mpp_dev * mpp,struct mpp_task * mpp_task)726 static void *rkvenc2_prepare(struct mpp_dev *mpp, struct mpp_task *mpp_task)
727 {
728 struct mpp_taskqueue *queue = mpp->queue;
729 unsigned long flags;
730 s32 core_id;
731
732 spin_lock_irqsave(&queue->running_lock, flags);
733
734 core_id = find_first_bit(&queue->core_idle, queue->core_count);
735
736 if (core_id >= queue->core_count) {
737 mpp_task = NULL;
738 mpp_dbg_core("core %d all busy %lx\n", core_id, queue->core_idle);
739 } else {
740 mpp_dbg_core("core %d set idle %lx\n", core_id, queue->core_idle);
741
742 clear_bit(core_id, &queue->core_idle);
743 mpp_task->mpp = queue->cores[core_id];
744 mpp_task->core_id = core_id;
745 }
746
747 spin_unlock_irqrestore(&queue->running_lock, flags);
748
749 return mpp_task;
750 }
751
rkvenc_run(struct mpp_dev * mpp,struct mpp_task * mpp_task)752 static int rkvenc_run(struct mpp_dev *mpp, struct mpp_task *mpp_task)
753 {
754 u32 i, j;
755 u32 start_val = 0;
756 struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
757 struct rkvenc_task *task = to_rkvenc_task(mpp_task);
758
759 mpp_debug_enter();
760
761 /* clear hardware counter */
762 mpp_write_relaxed(mpp, 0x5300, 0x2);
763
764 for (i = 0; i < task->w_req_cnt; i++) {
765 int ret;
766 u32 s, e, off;
767 u32 *regs;
768
769 struct mpp_request msg;
770 struct mpp_request *req = &task->w_reqs[i];
771
772 ret = rkvenc_get_class_msg(task, req->offset, &msg);
773 if (ret)
774 return -EINVAL;
775
776 s = (req->offset - msg.offset) / sizeof(u32);
777 e = s + req->size / sizeof(u32);
778 regs = (u32 *)msg.data;
779 for (j = s; j < e; j++) {
780 off = msg.offset + j * sizeof(u32);
781 if (off == enc->hw_info->enc_start_base) {
782 start_val = regs[j];
783 continue;
784 }
785 mpp_write_relaxed(mpp, off, regs[j]);
786 }
787 }
788
789 if (mpp_debug_unlikely(DEBUG_CORE))
790 dev_info(mpp->dev, "reg[%03x] %08x\n", 0x304,
791 mpp_read_relaxed(mpp, 0x304));
792
793 /* flush tlb before starting hardware */
794 mpp_iommu_flush_tlb(mpp->iommu_info);
795
796 /* init current task */
797 mpp->cur_task = mpp_task;
798
799 /* Flush the register before the start the device */
800 wmb();
801 mpp_write(mpp, enc->hw_info->enc_start_base, start_val);
802
803 mpp_debug_leave();
804
805 return 0;
806 }
807
rkvenc_irq(struct mpp_dev * mpp)808 static int rkvenc_irq(struct mpp_dev *mpp)
809 {
810 struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
811 struct rkvenc_hw_info *hw = enc->hw_info;
812
813 mpp_debug_enter();
814
815 mpp->irq_status = mpp_read(mpp, hw->int_sta_base);
816 if (!mpp->irq_status)
817 return IRQ_NONE;
818
819 mpp_write(mpp, hw->int_mask_base, 0x100);
820 mpp_write(mpp, hw->int_clr_base, 0xffffffff);
821 udelay(5);
822 mpp_write(mpp, hw->int_sta_base, 0);
823
824 mpp_debug_leave();
825
826 return IRQ_WAKE_THREAD;
827 }
828
rkvenc_isr(struct mpp_dev * mpp)829 static int rkvenc_isr(struct mpp_dev *mpp)
830 {
831 struct rkvenc_task *task;
832 struct mpp_task *mpp_task;
833 struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
834 struct mpp_taskqueue *queue = mpp->queue;
835
836 mpp_debug_enter();
837
838 /* FIXME use a spin lock here */
839 if (!mpp->cur_task) {
840 dev_err(mpp->dev, "no current task\n");
841 return IRQ_HANDLED;
842 }
843
844 mpp_task = mpp->cur_task;
845 mpp_time_diff(mpp_task);
846 mpp->cur_task = NULL;
847
848 if (mpp_task->mpp && mpp_task->mpp != mpp)
849 dev_err(mpp->dev, "mismatch core dev %p:%p\n", mpp_task->mpp, mpp);
850
851 task = to_rkvenc_task(mpp_task);
852 task->irq_status = mpp->irq_status;
853
854 mpp_debug(DEBUG_IRQ_STATUS, "%s irq_status: %08x\n",
855 dev_name(mpp->dev), task->irq_status);
856
857 if (task->irq_status & enc->hw_info->err_mask) {
858 atomic_inc(&mpp->reset_request);
859 /* dump register */
860 if (mpp_debug_unlikely(DEBUG_DUMP_ERR_REG))
861 mpp_task_dump_hw_reg(mpp, mpp_task);
862 }
863 mpp_task_finish(mpp_task->session, mpp_task);
864
865 set_bit(mpp->core_id, &queue->core_idle);
866 mpp_dbg_core("core %d isr idle %lx\n", mpp->core_id, queue->core_idle);
867
868 mpp_debug_leave();
869
870 return IRQ_HANDLED;
871 }
872
rkvenc_finish(struct mpp_dev * mpp,struct mpp_task * mpp_task)873 static int rkvenc_finish(struct mpp_dev *mpp, struct mpp_task *mpp_task)
874 {
875 u32 i, j;
876 u32 *reg;
877 struct rkvenc_task *task = to_rkvenc_task(mpp_task);
878
879 mpp_debug_enter();
880
881 for (i = 0; i < task->r_req_cnt; i++) {
882 int ret;
883 int s, e;
884 struct mpp_request msg;
885 struct mpp_request *req = &task->r_reqs[i];
886
887 ret = rkvenc_get_class_msg(task, req->offset, &msg);
888 if (ret)
889 return -EINVAL;
890 s = (req->offset - msg.offset) / sizeof(u32);
891 e = s + req->size / sizeof(u32);
892 reg = (u32 *)msg.data;
893 for (j = s; j < e; j++)
894 reg[j] = mpp_read_relaxed(mpp, msg.offset + j * sizeof(u32));
895
896 }
897 /* revert hack for irq status */
898 reg = rkvenc_get_class_reg(task, task->hw_info->int_sta_base);
899 if (reg)
900 *reg = task->irq_status;
901
902 mpp_debug_leave();
903
904 return 0;
905 }
906
rkvenc_result(struct mpp_dev * mpp,struct mpp_task * mpp_task,struct mpp_task_msgs * msgs)907 static int rkvenc_result(struct mpp_dev *mpp,
908 struct mpp_task *mpp_task,
909 struct mpp_task_msgs *msgs)
910 {
911 u32 i;
912 struct rkvenc_task *task = to_rkvenc_task(mpp_task);
913
914 mpp_debug_enter();
915
916 for (i = 0; i < task->r_req_cnt; i++) {
917 struct mpp_request *req = &task->r_reqs[i];
918 u32 *reg = rkvenc_get_class_reg(task, req->offset);
919
920 if (!reg)
921 return -EINVAL;
922 if (copy_to_user(req->data, reg, req->size)) {
923 mpp_err("copy_to_user reg fail\n");
924 return -EIO;
925 }
926 }
927
928 mpp_debug_leave();
929
930 return 0;
931 }
932
rkvenc_free_task(struct mpp_session * session,struct mpp_task * mpp_task)933 static int rkvenc_free_task(struct mpp_session *session,
934 struct mpp_task *mpp_task)
935 {
936 struct rkvenc_task *task = to_rkvenc_task(mpp_task);
937
938 mpp_task_finalize(session, mpp_task);
939 rkvenc_free_class_msg(task);
940 kfree(task);
941
942 return 0;
943 }
944
rkvenc_control(struct mpp_session * session,struct mpp_request * req)945 static int rkvenc_control(struct mpp_session *session, struct mpp_request *req)
946 {
947 switch (req->cmd) {
948 case MPP_CMD_SEND_CODEC_INFO: {
949 int i;
950 int cnt;
951 struct codec_info_elem elem;
952 struct rkvenc2_session_priv *priv;
953
954 if (!session || !session->priv) {
955 mpp_err("session info null\n");
956 return -EINVAL;
957 }
958 priv = session->priv;
959
960 cnt = req->size / sizeof(elem);
961 cnt = (cnt > ENC_INFO_BUTT) ? ENC_INFO_BUTT : cnt;
962 mpp_debug(DEBUG_IOCTL, "codec info count %d\n", cnt);
963 for (i = 0; i < cnt; i++) {
964 if (copy_from_user(&elem, req->data + i * sizeof(elem), sizeof(elem))) {
965 mpp_err("copy_from_user failed\n");
966 continue;
967 }
968 if (elem.type > ENC_INFO_BASE && elem.type < ENC_INFO_BUTT &&
969 elem.flag > CODEC_INFO_FLAG_NULL && elem.flag < CODEC_INFO_FLAG_BUTT) {
970 elem.type = array_index_nospec(elem.type, ENC_INFO_BUTT);
971 priv->codec_info[elem.type].flag = elem.flag;
972 priv->codec_info[elem.type].val = elem.data;
973 } else {
974 mpp_err("codec info invalid, type %d, flag %d\n",
975 elem.type, elem.flag);
976 }
977 }
978 } break;
979 default: {
980 mpp_err("unknown mpp ioctl cmd %x\n", req->cmd);
981 } break;
982 }
983
984 return 0;
985 }
986
rkvenc_free_session(struct mpp_session * session)987 static int rkvenc_free_session(struct mpp_session *session)
988 {
989 if (session && session->priv) {
990 kfree(session->priv);
991 session->priv = NULL;
992 }
993
994 return 0;
995 }
996
rkvenc_init_session(struct mpp_session * session)997 static int rkvenc_init_session(struct mpp_session *session)
998 {
999 struct rkvenc2_session_priv *priv;
1000
1001 if (!session) {
1002 mpp_err("session is null\n");
1003 return -EINVAL;
1004 }
1005
1006 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1007 if (!priv)
1008 return -ENOMEM;
1009
1010 init_rwsem(&priv->rw_sem);
1011 session->priv = priv;
1012
1013 return 0;
1014 }
1015
1016 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
rkvenc_procfs_remove(struct mpp_dev * mpp)1017 static int rkvenc_procfs_remove(struct mpp_dev *mpp)
1018 {
1019 struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1020
1021 if (enc->procfs) {
1022 proc_remove(enc->procfs);
1023 enc->procfs = NULL;
1024 }
1025
1026 return 0;
1027 }
1028
rkvenc_dump_session(struct mpp_session * session,struct seq_file * seq)1029 static int rkvenc_dump_session(struct mpp_session *session, struct seq_file *seq)
1030 {
1031 int i;
1032 struct rkvenc2_session_priv *priv = session->priv;
1033
1034 down_read(&priv->rw_sem);
1035 /* item name */
1036 seq_puts(seq, "------------------------------------------------------");
1037 seq_puts(seq, "------------------------------------------------------\n");
1038 seq_printf(seq, "|%8s|", (const char *)"session");
1039 seq_printf(seq, "%8s|", (const char *)"device");
1040 for (i = ENC_INFO_BASE; i < ENC_INFO_BUTT; i++) {
1041 bool show = priv->codec_info[i].flag;
1042
1043 if (show)
1044 seq_printf(seq, "%8s|", enc_info_item_name[i]);
1045 }
1046 seq_puts(seq, "\n");
1047 /* item data*/
1048 seq_printf(seq, "|%8p|", session);
1049 seq_printf(seq, "%8s|", mpp_device_name[session->device_type]);
1050 for (i = ENC_INFO_BASE; i < ENC_INFO_BUTT; i++) {
1051 u32 flag = priv->codec_info[i].flag;
1052
1053 if (!flag)
1054 continue;
1055 if (flag == CODEC_INFO_FLAG_NUMBER) {
1056 u32 data = priv->codec_info[i].val;
1057
1058 seq_printf(seq, "%8d|", data);
1059 } else if (flag == CODEC_INFO_FLAG_STRING) {
1060 const char *name = (const char *)&priv->codec_info[i].val;
1061
1062 seq_printf(seq, "%8s|", name);
1063 } else {
1064 seq_printf(seq, "%8s|", (const char *)"null");
1065 }
1066 }
1067 seq_puts(seq, "\n");
1068 up_read(&priv->rw_sem);
1069
1070 return 0;
1071 }
1072
rkvenc_show_session_info(struct seq_file * seq,void * offset)1073 static int rkvenc_show_session_info(struct seq_file *seq, void *offset)
1074 {
1075 struct mpp_session *session = NULL, *n;
1076 struct mpp_dev *mpp = seq->private;
1077
1078 mutex_lock(&mpp->srv->session_lock);
1079 list_for_each_entry_safe(session, n,
1080 &mpp->srv->session_list,
1081 session_link) {
1082 if (session->device_type != MPP_DEVICE_RKVENC)
1083 continue;
1084 if (!session->priv)
1085 continue;
1086 if (mpp->dev_ops->dump_session)
1087 mpp->dev_ops->dump_session(session, seq);
1088 }
1089 mutex_unlock(&mpp->srv->session_lock);
1090
1091 return 0;
1092 }
1093
rkvenc_procfs_init(struct mpp_dev * mpp)1094 static int rkvenc_procfs_init(struct mpp_dev *mpp)
1095 {
1096 struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1097 char name[32];
1098
1099 if (!mpp->dev || !mpp->dev->of_node || !mpp->dev->of_node->name ||
1100 !mpp->srv || !mpp->srv->procfs)
1101 return -EINVAL;
1102
1103 snprintf(name, sizeof(name) - 1, "%s%d",
1104 mpp->dev->of_node->name, mpp->core_id);
1105
1106 enc->procfs = proc_mkdir(name, mpp->srv->procfs);
1107 if (IS_ERR_OR_NULL(enc->procfs)) {
1108 mpp_err("failed on open procfs\n");
1109 enc->procfs = NULL;
1110 return -EIO;
1111 }
1112 /* for debug */
1113 mpp_procfs_create_u32("aclk", 0644,
1114 enc->procfs, &enc->aclk_info.debug_rate_hz);
1115 mpp_procfs_create_u32("clk_core", 0644,
1116 enc->procfs, &enc->core_clk_info.debug_rate_hz);
1117 mpp_procfs_create_u32("session_buffers", 0644,
1118 enc->procfs, &mpp->session_max_buffers);
1119 /* for show session info */
1120 proc_create_single_data("sessions-info", 0444,
1121 enc->procfs, rkvenc_show_session_info, mpp);
1122
1123 return 0;
1124 }
1125
rkvenc_procfs_ccu_init(struct mpp_dev * mpp)1126 static int rkvenc_procfs_ccu_init(struct mpp_dev *mpp)
1127 {
1128 struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1129
1130 if (!enc->procfs)
1131 goto done;
1132
1133 mpp_procfs_create_u32("disable_work", 0644,
1134 enc->procfs, &enc->disable_work);
1135 done:
1136 return 0;
1137 }
1138 #else
rkvenc_procfs_remove(struct mpp_dev * mpp)1139 static inline int rkvenc_procfs_remove(struct mpp_dev *mpp)
1140 {
1141 return 0;
1142 }
1143
rkvenc_procfs_init(struct mpp_dev * mpp)1144 static inline int rkvenc_procfs_init(struct mpp_dev *mpp)
1145 {
1146 return 0;
1147 }
1148
rkvenc_procfs_ccu_init(struct mpp_dev * mpp)1149 static inline int rkvenc_procfs_ccu_init(struct mpp_dev *mpp)
1150 {
1151 return 0;
1152 }
1153 #endif
1154
rkvenc_init(struct mpp_dev * mpp)1155 static int rkvenc_init(struct mpp_dev *mpp)
1156 {
1157 struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1158 int ret = 0;
1159
1160 mpp->grf_info = &mpp->srv->grf_infos[MPP_DRIVER_RKVENC];
1161
1162 /* Get clock info from dtsi */
1163 ret = mpp_get_clk_info(mpp, &enc->aclk_info, "aclk_vcodec");
1164 if (ret)
1165 mpp_err("failed on clk_get aclk_vcodec\n");
1166 ret = mpp_get_clk_info(mpp, &enc->hclk_info, "hclk_vcodec");
1167 if (ret)
1168 mpp_err("failed on clk_get hclk_vcodec\n");
1169 ret = mpp_get_clk_info(mpp, &enc->core_clk_info, "clk_core");
1170 if (ret)
1171 mpp_err("failed on clk_get clk_core\n");
1172 /* Get normal max workload from dtsi */
1173 of_property_read_u32(mpp->dev->of_node,
1174 "rockchip,default-max-load",
1175 &enc->default_max_load);
1176 /* Set default rates */
1177 mpp_set_clk_info_rate_hz(&enc->aclk_info, CLK_MODE_DEFAULT, 300 * MHZ);
1178 mpp_set_clk_info_rate_hz(&enc->core_clk_info, CLK_MODE_DEFAULT, 600 * MHZ);
1179
1180 /* Get reset control from dtsi */
1181 enc->rst_a = mpp_reset_control_get(mpp, RST_TYPE_A, "video_a");
1182 if (!enc->rst_a)
1183 mpp_err("No aclk reset resource define\n");
1184 enc->rst_h = mpp_reset_control_get(mpp, RST_TYPE_H, "video_h");
1185 if (!enc->rst_h)
1186 mpp_err("No hclk reset resource define\n");
1187 enc->rst_core = mpp_reset_control_get(mpp, RST_TYPE_CORE, "video_core");
1188 if (!enc->rst_core)
1189 mpp_err("No core reset resource define\n");
1190
1191 return 0;
1192 }
1193
rkvenc_reset(struct mpp_dev * mpp)1194 static int rkvenc_reset(struct mpp_dev *mpp)
1195 {
1196 struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1197 struct rkvenc_hw_info *hw = enc->hw_info;
1198 struct mpp_taskqueue *queue = mpp->queue;
1199
1200 mpp_debug_enter();
1201
1202 /* safe reset */
1203 mpp_write(mpp, hw->int_mask_base, 0x3FF);
1204 mpp_write(mpp, hw->enc_clr_base, 0x1);
1205 udelay(5);
1206 mpp_write(mpp, hw->int_clr_base, 0xffffffff);
1207 mpp_write(mpp, hw->int_sta_base, 0);
1208
1209 /* cru reset */
1210 if (enc->rst_a && enc->rst_h && enc->rst_core) {
1211 mpp_pmu_idle_request(mpp, true);
1212 mpp_safe_reset(enc->rst_a);
1213 mpp_safe_reset(enc->rst_h);
1214 mpp_safe_reset(enc->rst_core);
1215 udelay(5);
1216 mpp_safe_unreset(enc->rst_a);
1217 mpp_safe_unreset(enc->rst_h);
1218 mpp_safe_unreset(enc->rst_core);
1219 mpp_pmu_idle_request(mpp, false);
1220 }
1221
1222 set_bit(mpp->core_id, &queue->core_idle);
1223 mpp_dbg_core("core %d reset idle %lx\n", mpp->core_id, queue->core_idle);
1224
1225 mpp_debug_leave();
1226
1227 return 0;
1228 }
1229
rkvenc_clk_on(struct mpp_dev * mpp)1230 static int rkvenc_clk_on(struct mpp_dev *mpp)
1231 {
1232 struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1233
1234 mpp_clk_safe_enable(enc->aclk_info.clk);
1235 mpp_clk_safe_enable(enc->hclk_info.clk);
1236 mpp_clk_safe_enable(enc->core_clk_info.clk);
1237
1238 return 0;
1239 }
1240
rkvenc_clk_off(struct mpp_dev * mpp)1241 static int rkvenc_clk_off(struct mpp_dev *mpp)
1242 {
1243 struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1244
1245 clk_disable_unprepare(enc->aclk_info.clk);
1246 clk_disable_unprepare(enc->hclk_info.clk);
1247 clk_disable_unprepare(enc->core_clk_info.clk);
1248
1249 return 0;
1250 }
1251
rkvenc_set_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)1252 static int rkvenc_set_freq(struct mpp_dev *mpp, struct mpp_task *mpp_task)
1253 {
1254 struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1255 struct rkvenc_task *task = to_rkvenc_task(mpp_task);
1256
1257 mpp_clk_set_rate(&enc->aclk_info, task->clk_mode);
1258 mpp_clk_set_rate(&enc->core_clk_info, task->clk_mode);
1259
1260 return 0;
1261 }
1262
1263 static struct mpp_hw_ops rkvenc_hw_ops = {
1264 .init = rkvenc_init,
1265 .clk_on = rkvenc_clk_on,
1266 .clk_off = rkvenc_clk_off,
1267 .set_freq = rkvenc_set_freq,
1268 .reset = rkvenc_reset,
1269 };
1270
1271 static struct mpp_dev_ops rkvenc_dev_ops_v2 = {
1272 .alloc_task = rkvenc_alloc_task,
1273 .run = rkvenc_run,
1274 .irq = rkvenc_irq,
1275 .isr = rkvenc_isr,
1276 .finish = rkvenc_finish,
1277 .result = rkvenc_result,
1278 .free_task = rkvenc_free_task,
1279 .ioctl = rkvenc_control,
1280 .init_session = rkvenc_init_session,
1281 .free_session = rkvenc_free_session,
1282 .dump_session = rkvenc_dump_session,
1283 };
1284
1285 static struct mpp_dev_ops rkvenc_ccu_dev_ops = {
1286 .alloc_task = rkvenc_ccu_alloc_task,
1287 .prepare = rkvenc2_prepare,
1288 .run = rkvenc_run,
1289 .irq = rkvenc_irq,
1290 .isr = rkvenc_isr,
1291 .finish = rkvenc_finish,
1292 .result = rkvenc_result,
1293 .free_task = rkvenc_free_task,
1294 .ioctl = rkvenc_control,
1295 .init_session = rkvenc_init_session,
1296 .free_session = rkvenc_free_session,
1297 .dump_session = rkvenc_dump_session,
1298 };
1299
1300
1301 static const struct mpp_dev_var rkvenc_v2_data = {
1302 .device_type = MPP_DEVICE_RKVENC,
1303 .hw_info = &rkvenc_v2_hw_info.hw,
1304 .trans_info = trans_rkvenc_v2,
1305 .hw_ops = &rkvenc_hw_ops,
1306 .dev_ops = &rkvenc_dev_ops_v2,
1307 };
1308
1309 static const struct mpp_dev_var rkvenc_ccu_data = {
1310 .device_type = MPP_DEVICE_RKVENC,
1311 .hw_info = &rkvenc_v2_hw_info.hw,
1312 .trans_info = trans_rkvenc_v2,
1313 .hw_ops = &rkvenc_hw_ops,
1314 .dev_ops = &rkvenc_ccu_dev_ops,
1315 };
1316
1317 static const struct of_device_id mpp_rkvenc_dt_match[] = {
1318 {
1319 .compatible = "rockchip,rkv-encoder-v2",
1320 .data = &rkvenc_v2_data,
1321 },
1322 #ifdef CONFIG_CPU_RK3588
1323 {
1324 .compatible = "rockchip,rkv-encoder-v2-core",
1325 .data = &rkvenc_ccu_data,
1326 },
1327 {
1328 .compatible = "rockchip,rkv-encoder-v2-ccu",
1329 },
1330 #endif
1331 {},
1332 };
1333
rkvenc_ccu_probe(struct platform_device * pdev)1334 static int rkvenc_ccu_probe(struct platform_device *pdev)
1335 {
1336 struct rkvenc_ccu *ccu;
1337 struct device *dev = &pdev->dev;
1338
1339 ccu = devm_kzalloc(dev, sizeof(*ccu), GFP_KERNEL);
1340 if (!ccu)
1341 return -ENOMEM;
1342
1343 platform_set_drvdata(pdev, ccu);
1344
1345 mutex_init(&ccu->lock);
1346 INIT_LIST_HEAD(&ccu->core_list);
1347
1348 return 0;
1349 }
1350
rkvenc_attach_ccu(struct device * dev,struct rkvenc_dev * enc)1351 static int rkvenc_attach_ccu(struct device *dev, struct rkvenc_dev *enc)
1352 {
1353 struct device_node *np;
1354 struct platform_device *pdev;
1355 struct rkvenc_ccu *ccu;
1356
1357 mpp_debug_enter();
1358
1359 np = of_parse_phandle(dev->of_node, "rockchip,ccu", 0);
1360 if (!np || !of_device_is_available(np))
1361 return -ENODEV;
1362
1363 pdev = of_find_device_by_node(np);
1364 of_node_put(np);
1365 if (!pdev)
1366 return -ENODEV;
1367
1368 ccu = platform_get_drvdata(pdev);
1369 if (!ccu)
1370 return -ENOMEM;
1371
1372 INIT_LIST_HEAD(&enc->core_link);
1373 mutex_lock(&ccu->lock);
1374 ccu->core_num++;
1375 list_add_tail(&enc->core_link, &ccu->core_list);
1376 mutex_unlock(&ccu->lock);
1377
1378 /* attach the ccu-domain to current core */
1379 if (!ccu->main_core) {
1380 /**
1381 * set the first device for the main-core,
1382 * then the domain of the main-core named ccu-domain
1383 */
1384 ccu->main_core = &enc->mpp;
1385 } else {
1386 struct mpp_iommu_info *ccu_info, *cur_info;
1387
1388 /* set the ccu-domain for current device */
1389 ccu_info = ccu->main_core->iommu_info;
1390 cur_info = enc->mpp.iommu_info;
1391
1392 cur_info->domain = ccu_info->domain;
1393 cur_info->rw_sem = ccu_info->rw_sem;
1394 mpp_iommu_attach(cur_info);
1395 }
1396 enc->ccu = ccu;
1397
1398 dev_info(dev, "attach ccu as core %d\n", enc->mpp.core_id);
1399 mpp_debug_enter();
1400
1401 return 0;
1402 }
1403
rkvenc2_alloc_rcbbuf(struct platform_device * pdev,struct rkvenc_dev * enc)1404 static int rkvenc2_alloc_rcbbuf(struct platform_device *pdev, struct rkvenc_dev *enc)
1405 {
1406 int ret;
1407 u32 vals[2];
1408 dma_addr_t iova;
1409 u32 sram_used, sram_size;
1410 struct device_node *sram_np;
1411 struct resource sram_res;
1412 resource_size_t sram_start, sram_end;
1413 struct iommu_domain *domain;
1414 struct device *dev = &pdev->dev;
1415
1416 /* get rcb iova start and size */
1417 ret = device_property_read_u32_array(dev, "rockchip,rcb-iova", vals, 2);
1418 if (ret)
1419 return ret;
1420
1421 iova = PAGE_ALIGN(vals[0]);
1422 sram_used = PAGE_ALIGN(vals[1]);
1423 if (!sram_used) {
1424 dev_err(dev, "sram rcb invalid.\n");
1425 return -EINVAL;
1426 }
1427 /* alloc reserve iova for rcb */
1428 ret = iommu_dma_reserve_iova(dev, iova, sram_used);
1429 if (ret) {
1430 dev_err(dev, "alloc rcb iova error.\n");
1431 return ret;
1432 }
1433 /* get sram device node */
1434 sram_np = of_parse_phandle(dev->of_node, "rockchip,sram", 0);
1435 if (!sram_np) {
1436 dev_err(dev, "could not find phandle sram\n");
1437 return -ENODEV;
1438 }
1439 /* get sram start and size */
1440 ret = of_address_to_resource(sram_np, 0, &sram_res);
1441 of_node_put(sram_np);
1442 if (ret) {
1443 dev_err(dev, "find sram res error\n");
1444 return ret;
1445 }
1446 /* check sram start and size is PAGE_SIZE align */
1447 sram_start = round_up(sram_res.start, PAGE_SIZE);
1448 sram_end = round_down(sram_res.start + resource_size(&sram_res), PAGE_SIZE);
1449 if (sram_end <= sram_start) {
1450 dev_err(dev, "no available sram, phy_start %pa, phy_end %pa\n",
1451 &sram_start, &sram_end);
1452 return -ENOMEM;
1453 }
1454 sram_size = sram_end - sram_start;
1455 sram_size = sram_used < sram_size ? sram_used : sram_size;
1456 /* iova map to sram */
1457 domain = enc->mpp.iommu_info->domain;
1458 ret = iommu_map(domain, iova, sram_start, sram_size, IOMMU_READ | IOMMU_WRITE);
1459 if (ret) {
1460 dev_err(dev, "sram iommu_map error.\n");
1461 return ret;
1462 }
1463 /* alloc dma for the remaining buffer, sram + dma */
1464 if (sram_size < sram_used) {
1465 struct page *page;
1466 size_t page_size = PAGE_ALIGN(sram_used - sram_size);
1467
1468 page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(page_size));
1469 if (!page) {
1470 dev_err(dev, "unable to allocate pages\n");
1471 ret = -ENOMEM;
1472 goto err_sram_map;
1473 }
1474 /* iova map to dma */
1475 ret = iommu_map(domain, iova + sram_size, page_to_phys(page),
1476 page_size, IOMMU_READ | IOMMU_WRITE);
1477 if (ret) {
1478 dev_err(dev, "page iommu_map error.\n");
1479 __free_pages(page, get_order(page_size));
1480 goto err_sram_map;
1481 }
1482 enc->rcb_page = page;
1483 }
1484
1485 enc->sram_size = sram_size;
1486 enc->sram_used = sram_used;
1487 enc->sram_iova = iova;
1488 enc->sram_enabled = -1;
1489 dev_info(dev, "sram_start %pa\n", &sram_start);
1490 dev_info(dev, "sram_iova %pad\n", &enc->sram_iova);
1491 dev_info(dev, "sram_size %u\n", enc->sram_size);
1492 dev_info(dev, "sram_used %u\n", enc->sram_used);
1493
1494 return 0;
1495
1496 err_sram_map:
1497 iommu_unmap(domain, iova, sram_size);
1498
1499 return ret;
1500 }
1501
rkvenc_core_probe(struct platform_device * pdev)1502 static int rkvenc_core_probe(struct platform_device *pdev)
1503 {
1504 int ret = 0;
1505 struct device *dev = &pdev->dev;
1506 struct rkvenc_dev *enc = NULL;
1507 struct mpp_dev *mpp = NULL;
1508
1509 enc = devm_kzalloc(dev, sizeof(*enc), GFP_KERNEL);
1510 if (!enc)
1511 return -ENOMEM;
1512
1513 mpp = &enc->mpp;
1514 platform_set_drvdata(pdev, enc);
1515
1516 if (pdev->dev.of_node) {
1517 struct device_node *np = pdev->dev.of_node;
1518 const struct of_device_id *match = NULL;
1519
1520 match = of_match_node(mpp_rkvenc_dt_match, np);
1521 if (match)
1522 mpp->var = (struct mpp_dev_var *)match->data;
1523
1524 mpp->core_id = of_alias_get_id(np, "rkvenc");
1525 }
1526
1527 ret = mpp_dev_probe(mpp, pdev);
1528 if (ret)
1529 return ret;
1530
1531 rkvenc2_alloc_rcbbuf(pdev, enc);
1532
1533 /* attach core to ccu */
1534 ret = rkvenc_attach_ccu(dev, enc);
1535 if (ret) {
1536 dev_err(dev, "attach ccu failed\n");
1537 return ret;
1538 }
1539
1540 ret = devm_request_threaded_irq(dev, mpp->irq,
1541 mpp_dev_irq,
1542 mpp_dev_isr_sched,
1543 IRQF_SHARED,
1544 dev_name(dev), mpp);
1545 if (ret) {
1546 dev_err(dev, "register interrupter runtime failed\n");
1547 return -EINVAL;
1548 }
1549 mpp->session_max_buffers = RKVENC_SESSION_MAX_BUFFERS;
1550 enc->hw_info = to_rkvenc_info(mpp->var->hw_info);
1551 rkvenc_procfs_init(mpp);
1552 rkvenc_procfs_ccu_init(mpp);
1553
1554 /* if current is main-core, register current device to mpp service */
1555 if (mpp == enc->ccu->main_core)
1556 mpp_dev_register_srv(mpp, mpp->srv);
1557
1558 return 0;
1559 }
1560
rkvenc_probe_default(struct platform_device * pdev)1561 static int rkvenc_probe_default(struct platform_device *pdev)
1562 {
1563 int ret = 0;
1564 struct device *dev = &pdev->dev;
1565 struct rkvenc_dev *enc = NULL;
1566 struct mpp_dev *mpp = NULL;
1567 const struct of_device_id *match = NULL;
1568
1569 enc = devm_kzalloc(dev, sizeof(*enc), GFP_KERNEL);
1570 if (!enc)
1571 return -ENOMEM;
1572
1573 mpp = &enc->mpp;
1574 platform_set_drvdata(pdev, enc);
1575
1576 if (pdev->dev.of_node) {
1577 match = of_match_node(mpp_rkvenc_dt_match, pdev->dev.of_node);
1578 if (match)
1579 mpp->var = (struct mpp_dev_var *)match->data;
1580 }
1581
1582 ret = mpp_dev_probe(mpp, pdev);
1583 if (ret)
1584 return ret;
1585
1586 rkvenc2_alloc_rcbbuf(pdev, enc);
1587
1588 ret = devm_request_threaded_irq(dev, mpp->irq,
1589 mpp_dev_irq,
1590 mpp_dev_isr_sched,
1591 IRQF_SHARED,
1592 dev_name(dev), mpp);
1593 if (ret) {
1594 dev_err(dev, "register interrupter runtime failed\n");
1595 goto failed_get_irq;
1596 }
1597 mpp->session_max_buffers = RKVENC_SESSION_MAX_BUFFERS;
1598 enc->hw_info = to_rkvenc_info(mpp->var->hw_info);
1599 rkvenc_procfs_init(mpp);
1600 mpp_dev_register_srv(mpp, mpp->srv);
1601
1602 return 0;
1603
1604 failed_get_irq:
1605 mpp_dev_remove(mpp);
1606
1607 return ret;
1608 }
1609
rkvenc_probe(struct platform_device * pdev)1610 static int rkvenc_probe(struct platform_device *pdev)
1611 {
1612 int ret = 0;
1613 struct device *dev = &pdev->dev;
1614 struct device_node *np = dev->of_node;
1615
1616 dev_info(dev, "probing start\n");
1617
1618 if (strstr(np->name, "ccu"))
1619 ret = rkvenc_ccu_probe(pdev);
1620 else if (strstr(np->name, "core"))
1621 ret = rkvenc_core_probe(pdev);
1622 else
1623 ret = rkvenc_probe_default(pdev);
1624
1625 dev_info(dev, "probing finish\n");
1626
1627 return ret;
1628 }
1629
rkvenc2_free_rcbbuf(struct platform_device * pdev,struct rkvenc_dev * enc)1630 static int rkvenc2_free_rcbbuf(struct platform_device *pdev, struct rkvenc_dev *enc)
1631 {
1632 struct iommu_domain *domain;
1633
1634 if (enc->rcb_page) {
1635 size_t page_size = PAGE_ALIGN(enc->sram_used - enc->sram_size);
1636
1637 __free_pages(enc->rcb_page, get_order(page_size));
1638 }
1639 if (enc->sram_iova) {
1640 domain = enc->mpp.iommu_info->domain;
1641 iommu_unmap(domain, enc->sram_iova, enc->sram_used);
1642 }
1643
1644 return 0;
1645 }
1646
rkvenc_remove(struct platform_device * pdev)1647 static int rkvenc_remove(struct platform_device *pdev)
1648 {
1649 struct device *dev = &pdev->dev;
1650 struct device_node *np = dev->of_node;
1651
1652 if (strstr(np->name, "ccu")) {
1653 dev_info(dev, "remove ccu\n");
1654 } else if (strstr(np->name, "core")) {
1655 struct rkvenc_dev *enc = platform_get_drvdata(pdev);
1656
1657 dev_info(dev, "remove core\n");
1658 if (enc->ccu) {
1659 mutex_lock(&enc->ccu->lock);
1660 list_del_init(&enc->core_link);
1661 enc->ccu->core_num--;
1662 mutex_unlock(&enc->ccu->lock);
1663 }
1664 rkvenc2_free_rcbbuf(pdev, enc);
1665 mpp_dev_remove(&enc->mpp);
1666 rkvenc_procfs_remove(&enc->mpp);
1667 } else {
1668 struct rkvenc_dev *enc = platform_get_drvdata(pdev);
1669
1670 dev_info(dev, "remove device\n");
1671 rkvenc2_free_rcbbuf(pdev, enc);
1672 mpp_dev_remove(&enc->mpp);
1673 rkvenc_procfs_remove(&enc->mpp);
1674 }
1675
1676 return 0;
1677 }
1678
rkvenc_shutdown(struct platform_device * pdev)1679 static void rkvenc_shutdown(struct platform_device *pdev)
1680 {
1681 struct device *dev = &pdev->dev;
1682
1683 if (!strstr(dev_name(dev), "ccu")) {
1684 int ret;
1685 int val;
1686 struct rkvenc_dev *enc = platform_get_drvdata(pdev);
1687 struct mpp_dev *mpp = &enc->mpp;
1688
1689 dev_info(dev, "shutdown device\n");
1690
1691 if (mpp->srv)
1692 atomic_inc(&mpp->srv->shutdown_request);
1693
1694 ret = readx_poll_timeout(atomic_read,
1695 &mpp->task_count,
1696 val, val == 0, 1000, 200000);
1697 if (ret == -ETIMEDOUT)
1698 dev_err(dev, "wait total running time out\n");
1699
1700 }
1701 dev_info(dev, "shutdown success\n");
1702 }
1703
1704 struct platform_driver rockchip_rkvenc2_driver = {
1705 .probe = rkvenc_probe,
1706 .remove = rkvenc_remove,
1707 .shutdown = rkvenc_shutdown,
1708 .driver = {
1709 .name = RKVENC_DRIVER_NAME,
1710 .of_match_table = of_match_ptr(mpp_rkvenc_dt_match),
1711 },
1712 };
1713