• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2 /*
3  * Copyright (c) 2022 Rockchip Electronics Co., Ltd
4  *
5  * author:
6  *    Ding Wei, leo.ding@rock-chips.com
7  *
8  */
9 
10 #include <asm/cacheflush.h>
11 #include <linux/delay.h>
12 #include <linux/devfreq.h>
13 #include <linux/devfreq_cooling.h>
14 #include <linux/iopoll.h>
15 #include <linux/interrupt.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/of_platform.h>
20 #include <linux/of_address.h>
21 #include <linux/slab.h>
22 #include <linux/seq_file.h>
23 #include <linux/uaccess.h>
24 #include <linux/regmap.h>
25 #include <linux/regulator/consumer.h>
26 #include <linux/proc_fs.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/nospec.h>
29 #include <linux/workqueue.h>
30 #include <linux/dma-iommu.h>
31 #include <soc/rockchip/pm_domains.h>
32 #include <soc/rockchip/rockchip_ipa.h>
33 #include <soc/rockchip/rockchip_opp_select.h>
34 #include <soc/rockchip/rockchip_system_monitor.h>
35 
36 #include "mpp_debug.h"
37 #include "mpp_iommu.h"
38 #include "mpp_common.h"
39 
40 #define RKVENC_DRIVER_NAME "mpp_rkvenc2"
41 #define RKVENC_SINGLE 0444
42 #define RKVENC_MPP_PROCF 0644
43 
44 #define RKVENC_SESSION_MAX_BUFFERS 40
45 #define RKVENC_MAX_CORE_NUM 4
46 
47 #define to_rkvenc_info(info) container_of(info, struct rkvenc_hw_info, hw)
48 #define to_rkvenc_task(ctx) container_of(ctx, struct rkvenc_task, mpp_task)
49 #define to_rkvenc_dev(dev) container_of(dev, struct rkvenc_dev, mpp)
50 
51 enum RKVENC_FORMAT_TYPE {
52     RKVENC_FMT_BASE = 0x0000,
53     RKVENC_FMT_H264E = RKVENC_FMT_BASE + 0,
54     RKVENC_FMT_H265E = RKVENC_FMT_BASE + 1,
55 
56     RKVENC_FMT_OSD_BASE = 0x1000,
57     RKVENC_FMT_H264E_OSD = RKVENC_FMT_OSD_BASE + 0,
58     RKVENC_FMT_H265E_OSD = RKVENC_FMT_OSD_BASE + 1,
59     RKVENC_FMT_BUTT,
60 };
61 
62 enum RKVENC_CLASS_TYPE {
63     RKVENC_CLASS_BASE = 0,  /* base */
64     RKVENC_CLASS_PIC = 1,   /* picture configure */
65     RKVENC_CLASS_RC = 2,    /* rate control */
66     RKVENC_CLASS_PAR = 3,   /* parameter */
67     RKVENC_CLASS_SQI = 4,   /* subjective Adjust */
68     RKVENC_CLASS_SCL = 5,   /* scaling list */
69     RKVENC_CLASS_OSD = 6,   /* osd */
70     RKVENC_CLASS_ST = 7,    /* status */
71     RKVENC_CLASS_DEBUG = 8, /* debug */
72     RKVENC_CLASS_BUTT,
73 };
74 
75 enum RKVENC_CLASS_FD_TYPE {
76     RKVENC_CLASS_FD_BASE = 0, /* base */
77     RKVENC_CLASS_FD_OSD = 1,  /* osd */
78     RKVENC_CLASS_FD_BUTT,
79 };
80 
81 struct rkvenc_reg_msg {
82     u32 base_s;
83     u32 base_e;
84 };
85 
86 struct rkvenc_hw_info {
87     struct mpp_hw_info hw;
88     /* for register range check */
89     u32 reg_class;
90     struct rkvenc_reg_msg reg_msg[RKVENC_CLASS_BUTT];
91     /* for fd translate */
92     u32 fd_class;
93     struct {
94         u32 class;
95         u32 base_fmt;
96     } fd_reg[RKVENC_CLASS_FD_BUTT];
97     /* for get format */
98     struct {
99         u32 class;
100         u32 base;
101         u32 bitpos;
102         u32 bitlen;
103     } fmt_reg;
104     /* register info */
105     u32 enc_start_base;
106     u32 enc_clr_base;
107     u32 int_en_base;
108     u32 int_mask_base;
109     u32 int_clr_base;
110     u32 int_sta_base;
111     u32 enc_wdg_base;
112     u32 err_mask;
113 };
114 
115 struct rkvenc_task {
116     struct mpp_task mpp_task;
117     int fmt;
118     struct rkvenc_hw_info *hw_info;
119 
120     /* class register */
121     struct {
122         u32 valid;
123         u32 *data;
124         u32 size;
125     } reg[RKVENC_CLASS_BUTT];
126     /* register offset info */
127     struct reg_offset_info off_inf;
128 
129     enum MPP_CLOCK_MODE clk_mode;
130     u32 irq_status;
131     /* req for current task */
132     u32 w_req_cnt;
133     struct mpp_request w_reqs[MPP_MAX_MSG_NUM];
134     u32 r_req_cnt;
135     struct mpp_request r_reqs[MPP_MAX_MSG_NUM];
136     struct mpp_dma_buffer *table;
137     u32 task_no;
138 };
139 
140 #define RKVENC_MAX_RCB_NUM (4)
141 
142 struct rcb_info_elem {
143     u32 index;
144     u32 size;
145 };
146 
147 struct rkvenc2_rcb_info {
148     u32 cnt;
149     struct rcb_info_elem elem[RKVENC_MAX_RCB_NUM];
150 };
151 
152 struct rkvenc2_session_priv {
153     struct rw_semaphore rw_sem;
154     /* codec info from user */
155     struct {
156         /* show mode */
157         u32 flag;
158         /* item data */
159         u64 val;
160     } codec_info[ENC_INFO_BUTT];
161     /* rcb_info for sram */
162     struct rkvenc2_rcb_info rcb_inf;
163 };
164 
165 struct rkvenc_dev {
166     struct mpp_dev mpp;
167     struct rkvenc_hw_info *hw_info;
168 
169     struct mpp_clk_info aclk_info;
170     struct mpp_clk_info hclk_info;
171     struct mpp_clk_info core_clk_info;
172     u32 default_max_load;
173 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
174     struct proc_dir_entry *procfs;
175 #endif
176     struct reset_control *rst_a;
177     struct reset_control *rst_h;
178     struct reset_control *rst_core;
179     /* for ccu */
180     struct rkvenc_ccu *ccu;
181     struct list_head core_link;
182     u32 disable_work;
183 
184     /* internal rcb-memory */
185     u32 sram_size;
186     u32 sram_used;
187     dma_addr_t sram_iova;
188     u32 sram_enabled;
189     struct page *rcb_page;
190 };
191 
192 struct rkvenc_ccu {
193     u32 core_num;
194     /* lock for core attach */
195     struct mutex lock;
196     struct list_head core_list;
197     struct mpp_dev *main_core;
198 };
199 
200 static struct rkvenc_hw_info rkvenc_v2_hw_info = {
201     .hw =
202         {
203             .reg_num = 254,
204             .reg_id = 0,
205             .reg_en = 4,
206             .reg_start = 160,
207             .reg_end = 253,
208         },
209     .reg_class = RKVENC_CLASS_BUTT,
210     .reg_msg[RKVENC_CLASS_BASE] =
211         {
212             .base_s = 0x0000,
213             .base_e = 0x0058,
214         },
215     .reg_msg[RKVENC_CLASS_PIC] =
216         {
217             .base_s = 0x0280,
218             .base_e = 0x03f4,
219         },
220     .reg_msg[RKVENC_CLASS_RC] =
221         {
222             .base_s = 0x1000,
223             .base_e = 0x10e0,
224         },
225     .reg_msg[RKVENC_CLASS_PAR] =
226         {
227             .base_s = 0x1700,
228             .base_e = 0x1cd4,
229         },
230     .reg_msg[RKVENC_CLASS_SQI] =
231         {
232             .base_s = 0x2000,
233             .base_e = 0x21e4,
234         },
235     .reg_msg[RKVENC_CLASS_SCL] =
236         {
237             .base_s = 0x2200,
238             .base_e = 0x2c98,
239         },
240     .reg_msg[RKVENC_CLASS_OSD] =
241         {
242             .base_s = 0x3000,
243             .base_e = 0x347c,
244         },
245     .reg_msg[RKVENC_CLASS_ST] =
246         {
247             .base_s = 0x4000,
248             .base_e = 0x42cc,
249         },
250     .reg_msg[RKVENC_CLASS_DEBUG] =
251         {
252             .base_s = 0x5000,
253             .base_e = 0x5354,
254         },
255     .fd_class = RKVENC_CLASS_FD_BUTT,
256     .fd_reg[RKVENC_CLASS_FD_BASE] =
257         {
258             .class = RKVENC_CLASS_PIC,
259             .base_fmt = RKVENC_FMT_BASE,
260         },
261     .fd_reg[RKVENC_CLASS_FD_OSD] =
262         {
263             .class = RKVENC_CLASS_OSD,
264             .base_fmt = RKVENC_FMT_OSD_BASE,
265         },
266     .fmt_reg =
267         {
268             .class = RKVENC_CLASS_PIC,
269             .base = 0x0300,
270             .bitpos = 0,
271             .bitlen = 1,
272         },
273     .enc_start_base = 0x0010,
274     .enc_clr_base = 0x0014,
275     .int_en_base = 0x0020,
276     .int_mask_base = 0x0024,
277     .int_clr_base = 0x0028,
278     .int_sta_base = 0x002c,
279     .enc_wdg_base = 0x0038,
280     .err_mask = 0x03f0,
281 };
282 
283 /*
284  * file handle translate information for v2
285  */
286 static const u16 trans_tbl_h264e_v2[] = {
287     0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
288 };
289 
290 static const u16 trans_tbl_h264e_v2_osd[] = {
291     20, 21, 22, 23, 24, 25, 26, 27,
292 };
293 
294 static const u16 trans_tbl_h265e_v2[] = {
295     0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
296 };
297 
298 static const u16 trans_tbl_h265e_v2_osd[] = {
299     20, 21, 22, 23, 24, 25, 26, 27,
300 };
301 
302 static struct mpp_trans_info trans_rkvenc_v2[] = {
303     [RKVENC_FMT_H264E] =
304         {
305             .count = ARRAY_SIZE(trans_tbl_h264e_v2),
306             .table = trans_tbl_h264e_v2,
307         },
308     [RKVENC_FMT_H264E_OSD] =
309         {
310             .count = ARRAY_SIZE(trans_tbl_h264e_v2_osd),
311             .table = trans_tbl_h264e_v2_osd,
312         },
313     [RKVENC_FMT_H265E] =
314         {
315             .count = ARRAY_SIZE(trans_tbl_h265e_v2),
316             .table = trans_tbl_h265e_v2,
317         },
318     [RKVENC_FMT_H265E_OSD] =
319         {
320             .count = ARRAY_SIZE(trans_tbl_h265e_v2_osd),
321             .table = trans_tbl_h265e_v2_osd,
322         },
323 };
324 
req_over_class(struct mpp_request * req,struct rkvenc_task * task,int class)325 static bool req_over_class(struct mpp_request *req, struct rkvenc_task *task, int class)
326 {
327     bool ret;
328     u32 base_s, base_e, req_e;
329     struct rkvenc_hw_info *hw = task->hw_info;
330 
331     base_s = hw->reg_msg[class].base_s;
332     base_e = hw->reg_msg[class].base_e;
333     req_e = req->offset + req->size - sizeof(u32);
334 
335     ret = (req->offset <= base_e && req_e >= base_s) ? true : false;
336 
337     return ret;
338 }
339 
rkvenc_free_class_msg(struct rkvenc_task * task)340 static int rkvenc_free_class_msg(struct rkvenc_task *task)
341 {
342     u32 i;
343     u32 reg_class = task->hw_info->reg_class;
344 
345     for (i = 0; i < reg_class; i++) {
346         kfree(task->reg[i].data);
347         task->reg[i].size = 0;
348     }
349 
350     return 0;
351 }
352 
rkvenc_alloc_class_msg(struct rkvenc_task * task,int class)353 static int rkvenc_alloc_class_msg(struct rkvenc_task *task, int class)
354 {
355     u32 *data;
356     struct rkvenc_hw_info *hw = task->hw_info;
357 
358     if (!task->reg[class].data) {
359         u32 base_s = hw->reg_msg[class].base_s;
360         u32 base_e = hw->reg_msg[class].base_e;
361         u32 class_size = base_e - base_s + sizeof(u32);
362 
363         data = kzalloc(class_size, GFP_KERNEL);
364         if (!data) {
365             return -ENOMEM;
366         }
367         task->reg[class].data = data;
368         task->reg[class].size = class_size;
369     }
370 
371     return 0;
372 }
373 
rkvenc_update_req(struct rkvenc_task * task,int class,struct mpp_request * req_in,struct mpp_request * req_out)374 static int rkvenc_update_req(struct rkvenc_task *task, int class, struct mpp_request *req_in,
375                              struct mpp_request *req_out)
376 {
377     u32 base_s, base_e, req_e, s, e;
378     struct rkvenc_hw_info *hw = task->hw_info;
379 
380     base_s = hw->reg_msg[class].base_s;
381     base_e = hw->reg_msg[class].base_e;
382     req_e = req_in->offset + req_in->size - sizeof(u32);
383     s = max(req_in->offset, base_s);
384     e = min(req_e, base_e);
385 
386     req_out->offset = s;
387     req_out->size = e - s + sizeof(u32);
388     req_out->data = (u8 *)req_in->data + (s - req_in->offset);
389 
390     return 0;
391 }
392 
rkvenc_get_class_msg(struct rkvenc_task * task,u32 addr,struct mpp_request * msg)393 static int rkvenc_get_class_msg(struct rkvenc_task *task, u32 addr, struct mpp_request *msg)
394 {
395     int i;
396     bool found = false;
397     u32 base_s, base_e;
398     struct rkvenc_hw_info *hw = task->hw_info;
399 
400     if (!msg) {
401         return -EINVAL;
402     }
403 
404     memset(msg, 0, sizeof(*msg));
405     for (i = 0; i < hw->reg_class; i++) {
406         base_s = hw->reg_msg[i].base_s;
407         base_e = hw->reg_msg[i].base_e;
408         if (addr >= base_s && addr < base_e) {
409             found = true;
410             msg->offset = base_s;
411             msg->size = task->reg[i].size;
412             msg->data = task->reg[i].data;
413             break;
414         }
415     }
416 
417     return (found ? 0 : (-EINVAL));
418 }
419 
rkvenc_get_class_reg(struct rkvenc_task * task,u32 addr)420 static u32 *rkvenc_get_class_reg(struct rkvenc_task *task, u32 addr)
421 {
422     int i;
423     u8 *reg = NULL;
424     u32 base_s, base_e;
425     struct rkvenc_hw_info *hw = task->hw_info;
426 
427     for (i = 0; i < hw->reg_class; i++) {
428         base_s = hw->reg_msg[i].base_s;
429         base_e = hw->reg_msg[i].base_e;
430         if (addr >= base_s && addr < base_e) {
431             reg = (u8 *)task->reg[i].data + (addr - base_s);
432             break;
433         }
434     }
435 
436     return (u32 *)reg;
437 }
438 
rkvenc2_extract_rcb_info(struct rkvenc2_rcb_info * rcb_inf,struct mpp_request * req)439 static int rkvenc2_extract_rcb_info(struct rkvenc2_rcb_info *rcb_inf, struct mpp_request *req)
440 {
441     int max_size = ARRAY_SIZE(rcb_inf->elem);
442     int cnt = req->size / sizeof(rcb_inf->elem[0]);
443 
444     if (req->size > sizeof(rcb_inf->elem)) {
445         mpp_err("count %d,max_size %d\n", cnt, max_size);
446         return -EINVAL;
447     }
448     if (copy_from_user(rcb_inf->elem, req->data, req->size)) {
449         mpp_err("copy_from_user failed\n");
450         return -EINVAL;
451     }
452     rcb_inf->cnt = cnt;
453 
454     return 0;
455 }
456 
rkvenc_extract_task_msg(struct mpp_session * session,struct rkvenc_task * task,struct mpp_task_msgs * msgs)457 static int rkvenc_extract_task_msg(struct mpp_session *session, struct rkvenc_task *task, struct mpp_task_msgs *msgs)
458 {
459     int ret;
460     u32 i, j;
461     struct mpp_request *req;
462     struct rkvenc_hw_info *hw = task->hw_info;
463     mpp_debug_enter();
464     for (i = 0; i < msgs->req_cnt; i++) {
465         req = &msgs->reqs[i];
466         if (!req->size) {
467             continue;
468         }
469         switch (req->cmd) {
470             case MPP_CMD_SET_REG_WRITE: {
471                 void *data;
472                 struct mpp_request *wreq;
473                 for (j = 0; j < hw->reg_class; j++) {
474                     if (!req_over_class(req, task, j)) {
475                         continue;
476                     }
477                     ret = rkvenc_alloc_class_msg(task, j);
478                     if (ret) {
479                         mpp_err("alloc class msg %d fail.\n", j);
480                         goto fail;
481                     }
482                     wreq = &task->w_reqs[task->w_req_cnt];
483                     rkvenc_update_req(task, j, req, wreq);
484                     data = rkvenc_get_class_reg(task, wreq->offset);
485                     if (!data) {
486                         goto fail;
487                     }
488                     if (copy_from_user(data, wreq->data, wreq->size)) {
489                         mpp_err("copy_from_user fail, offset %08x\n", wreq->offset);
490                         ret = -EIO;
491                         goto fail;
492                     }
493                     task->reg[j].valid = 1;
494                     task->w_req_cnt++;
495                 }
496                 break;
497             }
498             case MPP_CMD_SET_REG_READ: {
499                 struct mpp_request *rreq;
500                 for (j = 0; j < hw->reg_class; j++) {
501                     if (!req_over_class(req, task, j)) {
502                         continue;
503                     }
504                     ret = rkvenc_alloc_class_msg(task, j);
505                     if (ret) {
506                         mpp_err("alloc class msg reg %d fail.\n", j);
507                         goto fail;
508                     }
509                     rreq = &task->r_reqs[task->r_req_cnt];
510                     rkvenc_update_req(task, j, req, rreq);
511                     task->reg[j].valid = 1;
512                     task->r_req_cnt++;
513                 }
514                 break;
515             }
516             case MPP_CMD_SET_REG_ADDR_OFFSET: {
517                 mpp_extract_reg_offset_info(&task->off_inf, req);
518                 break;
519             }
520             case MPP_CMD_SET_RCB_INFO: {
521                 struct rkvenc2_session_priv *priv = session->priv;
522                 if (priv) {
523                     rkvenc2_extract_rcb_info(&priv->rcb_inf, req);
524                 }
525                 break;
526             }
527             default:
528                 break;
529         }
530     }
531     mpp_debug(DEBUG_TASK_INFO, "w_req_cnt=%d, r_req_cnt=%d\n", task->w_req_cnt, task->r_req_cnt);
532 
533     mpp_debug_enter();
534     return 0;
535 
536 fail:
537     rkvenc_free_class_msg(task);
538 
539     mpp_debug_enter();
540     return ret;
541 }
542 
rkvenc_task_get_format(struct mpp_dev * mpp,struct rkvenc_task * task)543 static int rkvenc_task_get_format(struct mpp_dev *mpp, struct rkvenc_task *task)
544 {
545     u32 offset, val;
546 
547     struct rkvenc_hw_info *hw = task->hw_info;
548     u32 class = hw->fmt_reg.class;
549     u32 *class_reg = task->reg[class].data;
550     u32 class_size = task->reg[class].size;
551     u32 class_base = hw->reg_msg[class].base_s;
552     u32 bitpos = hw->fmt_reg.bitpos;
553     u32 bitlen = hw->fmt_reg.bitlen;
554 
555     if (!class_reg || !class_size) {
556         return -EINVAL;
557     }
558 
559     offset = hw->fmt_reg.base - class_base;
560     val = class_reg[offset / sizeof(u32)];
561     task->fmt = (val >> bitpos) & ((1 << bitlen) - 1);
562 
563     return 0;
564 }
565 
rkvenc_core_balance(struct rkvenc_ccu * ccu)566 static struct rkvenc_dev *rkvenc_core_balance(struct rkvenc_ccu *ccu)
567 {
568     struct rkvenc_dev *enc;
569     struct rkvenc_dev *core = NULL, *n;
570 
571     mpp_debug_enter();
572 
573     mutex_lock(&ccu->lock);
574     enc = list_first_entry(&ccu->core_list, struct rkvenc_dev, core_link);
575     list_for_each_entry_safe(core, n, &ccu->core_list, core_link)
576     {
577         mpp_debug(DEBUG_DEVICE, "%s, disable_work=%d, task_count=%d, task_index=%d\n", dev_name(core->mpp.dev),
578                   core->disable_work, atomic_read(&core->mpp.task_count), atomic_read(&core->mpp.task_index));
579         /* if core (except main-core) disabled, skip it */
580         if (core->disable_work) {
581             continue;
582         }
583         /* choose core with less task in queue */
584         if (atomic_read(&core->mpp.task_count) < atomic_read(&enc->mpp.task_count)) {
585             enc = core;
586             break;
587         }
588         /* choose core with less task which done */
589         if (atomic_read(&core->mpp.task_index) < atomic_read(&enc->mpp.task_index)) {
590             enc = core;
591         }
592     }
593     mutex_unlock(&ccu->lock);
594 
595     mpp_debug_leave();
596 
597     return enc;
598 }
599 
rkvenc2_set_rcbbuf(struct mpp_dev * mpp,struct mpp_session * session,struct rkvenc_task * task)600 static int rkvenc2_set_rcbbuf(struct mpp_dev *mpp, struct mpp_session *session, struct rkvenc_task *task)
601 {
602     struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
603     struct rkvenc2_session_priv *priv = session->priv;
604     u32 sram_enabled = 0;
605 
606     mpp_debug_enter();
607 
608     if (priv && enc->sram_iova) {
609         int i;
610         u32 *reg;
611         u32 reg_idx, rcb_size, rcb_offset;
612         struct rkvenc2_rcb_info *rcb_inf = &priv->rcb_inf;
613 
614         rcb_offset = 0;
615         for (i = 0; i < rcb_inf->cnt; i++) {
616             reg_idx = rcb_inf->elem[i].index;
617             rcb_size = rcb_inf->elem[i].size;
618 
619             if (rcb_offset > enc->sram_size || (rcb_offset + rcb_size) > enc->sram_used) {
620                 continue;
621             }
622 
623             mpp_debug(DEBUG_SRAM_INFO, "rcb: reg %d offset %d, size %d\n", reg_idx, rcb_offset, rcb_size);
624 
625             reg = rkvenc_get_class_reg(task, reg_idx * sizeof(u32));
626             if (reg) {
627                 *reg = enc->sram_iova + rcb_offset;
628             }
629 
630             rcb_offset += rcb_size;
631             sram_enabled = 1;
632         }
633     }
634     if (enc->sram_enabled != sram_enabled) {
635         mpp_debug(DEBUG_SRAM_INFO, "sram %s\n", sram_enabled ? "enabled" : "disabled");
636         enc->sram_enabled = sram_enabled;
637     }
638 
639     mpp_debug_leave();
640 
641     return 0;
642 }
643 
rkvenc_alloc_task(struct mpp_session * session,struct mpp_task_msgs * msgs)644 static void *rkvenc_alloc_task(struct mpp_session *session, struct mpp_task_msgs *msgs)
645 {
646     int ret;
647     struct rkvenc_task *task;
648     struct mpp_task *mpp_task;
649     struct mpp_dev *mpp = session->mpp;
650 
651     mpp_debug_enter();
652 
653     task = kzalloc(sizeof(*task), GFP_KERNEL);
654     if (!task) {
655         return NULL;
656     }
657 
658     mpp_task = &task->mpp_task;
659     mpp_task_init(session, mpp_task);
660     mpp_task->hw_info = mpp->var->hw_info;
661     task->hw_info = to_rkvenc_info(mpp_task->hw_info);
662     /* extract reqs for current task */
663     ret = rkvenc_extract_task_msg(session, task, msgs);
664     if (ret) {
665         goto free_task;
666     }
667     mpp_task->reg = task->reg[0].data;
668     /* get format */
669     ret = rkvenc_task_get_format(mpp, task);
670     if (ret) {
671         goto free_task;
672     }
673     /* process fd in register */
674     if (!(msgs->flags & MPP_FLAGS_REG_FD_NO_TRANS)) {
675         u32 i, j;
676         int cnt;
677         u32 off;
678         const u16 *tbl;
679         struct rkvenc_hw_info *hw = task->hw_info;
680 
681         for (i = 0; i < hw->fd_class; i++) {
682             u32 class = hw->fd_reg[i].class;
683             u32 fmt = hw->fd_reg[i].base_fmt + task->fmt;
684             u32 *reg = task->reg[class].data;
685             u32 ss = hw->reg_msg[class].base_s / sizeof(u32);
686 
687             if (!reg) {
688                 continue;
689             }
690 
691             ret = mpp_translate_reg_address(session, mpp_task, fmt, reg, NULL);
692             if (ret) {
693                 goto fail;
694             }
695 
696             cnt = mpp->var->trans_info[fmt].count;
697             tbl = mpp->var->trans_info[fmt].table;
698             for (j = 0; j < cnt; j++) {
699                 off = mpp_query_reg_offset_info(&task->off_inf, tbl[j] + ss);
700                 mpp_debug(DEBUG_IOMMU, "reg[%d] + offset %d\n", tbl[j] + ss, off);
701                 reg[tbl[j]] += off;
702             }
703         }
704     }
705     rkvenc2_set_rcbbuf(mpp, session, task);
706     task->clk_mode = CLK_MODE_NORMAL;
707 
708     mpp_debug_leave();
709 
710     return mpp_task;
711 
712 fail:
713     mpp_task_dump_mem_region(mpp, mpp_task);
714     mpp_task_dump_reg(mpp, mpp_task);
715     mpp_task_finalize(session, mpp_task);
716     /* free class register buffer */
717     rkvenc_free_class_msg(task);
718 free_task:
719     kfree(task);
720 
721     return NULL;
722 }
723 
rkvenc_ccu_alloc_task(struct mpp_session * session,struct mpp_task_msgs * msgs)724 static void *rkvenc_ccu_alloc_task(struct mpp_session *session, struct mpp_task_msgs *msgs)
725 {
726     struct rkvenc_dev *enc = to_rkvenc_dev(session->mpp);
727     /* if multi-cores, choose one for current task */
728     if (enc->ccu) {
729         enc = rkvenc_core_balance(enc->ccu);
730         session->mpp = &enc->mpp;
731     }
732     return rkvenc_alloc_task(session, msgs);
733 }
734 
rkvenc2_prepare(struct mpp_dev * mpp,struct mpp_task * mpp_task)735 static void *rkvenc2_prepare(struct mpp_dev *mpp, struct mpp_task *mpp_task)
736 {
737     struct mpp_taskqueue *queue = mpp->queue;
738     unsigned long flags;
739     s32 core_id;
740     spin_lock_irqsave(&queue->running_lock, flags);
741     core_id = find_first_bit(&queue->core_idle, queue->core_count);
742     if (core_id >= queue->core_count) {
743         mpp_task = NULL;
744         mpp_dbg_core("core %d all busy %lx\n", core_id, queue->core_idle);
745     } else {
746         mpp_dbg_core("core %d set idle %lx\n", core_id, queue->core_idle);
747 
748         clear_bit(core_id, &queue->core_idle);
749         mpp_task->mpp = queue->cores[core_id];
750         mpp_task->core_id = core_id;
751     }
752     spin_unlock_irqrestore(&queue->running_lock, flags);
753     return mpp_task;
754 }
755 
rkvenc_run(struct mpp_dev * mpp,struct mpp_task * mpp_task)756 static int rkvenc_run(struct mpp_dev *mpp, struct mpp_task *mpp_task)
757 {
758     u32 i, j;
759     u32 start_val = 0;
760     struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
761     struct rkvenc_task *task = to_rkvenc_task(mpp_task);
762 
763     mpp_debug_enter();
764 
765     /* clear hardware counter */
766     mpp_write_relaxed(mpp, 0x5300, 0x2);
767 
768     for (i = 0; i < task->w_req_cnt; i++) {
769         int ret;
770         u32 s, e, off;
771         u32 *regs;
772 
773         struct mpp_request msg;
774         struct mpp_request *req = &task->w_reqs[i];
775 
776         ret = rkvenc_get_class_msg(task, req->offset, &msg);
777         if (ret) {
778             return -EINVAL;
779         }
780 
781         s = (req->offset - msg.offset) / sizeof(u32);
782         e = s + req->size / sizeof(u32);
783         regs = (u32 *)msg.data;
784         for (j = s; j < e; j++) {
785             off = msg.offset + j * sizeof(u32);
786             if (off == enc->hw_info->enc_start_base) {
787                 start_val = regs[j];
788                 continue;
789             }
790             mpp_write_relaxed(mpp, off, regs[j]);
791         }
792     }
793 
794     if (mpp_debug_unlikely(DEBUG_CORE)) {
795         dev_info(mpp->dev, "reg[%03x] %08x\n", 0x304, mpp_read_relaxed(mpp, 0x304));
796     }
797 
798     /* flush tlb before starting hardware */
799     mpp_iommu_flush_tlb(mpp->iommu_info);
800 
801     /* init current task */
802     mpp->cur_task = mpp_task;
803 
804     /* Flush the register before the start the device */
805     wmb();
806     mpp_write(mpp, enc->hw_info->enc_start_base, start_val);
807 
808     mpp_debug_leave();
809 
810     return 0;
811 }
812 
rkvenc_irq(struct mpp_dev * mpp)813 static int rkvenc_irq(struct mpp_dev *mpp)
814 {
815     struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
816     struct rkvenc_hw_info *hw = enc->hw_info;
817 
818     mpp_debug_enter();
819 
820     mpp->irq_status = mpp_read(mpp, hw->int_sta_base);
821     if (!mpp->irq_status) {
822         return IRQ_NONE;
823     }
824 
825     mpp_write(mpp, hw->int_mask_base, 0x100);
826     mpp_write(mpp, hw->int_clr_base, 0xffffffff);
827     udelay(0x5);
828     mpp_write(mpp, hw->int_sta_base, 0);
829 
830     mpp_debug_leave();
831 
832     return IRQ_WAKE_THREAD;
833 }
834 
rkvenc_isr(struct mpp_dev * mpp)835 static int rkvenc_isr(struct mpp_dev *mpp)
836 {
837     struct rkvenc_task *task;
838     struct mpp_task *mpp_task;
839     struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
840     struct mpp_taskqueue *queue = mpp->queue;
841 
842     mpp_debug_enter();
843 
844     /* use a spin lock here */
845     if (!mpp->cur_task) {
846         dev_err(mpp->dev, "no current task\n");
847         return IRQ_HANDLED;
848     }
849 
850     mpp_task = mpp->cur_task;
851     mpp_time_diff(mpp_task);
852     mpp->cur_task = NULL;
853 
854     if (mpp_task->mpp && mpp_task->mpp != mpp) {
855         dev_err(mpp->dev, "mismatch core dev %p:%p\n", mpp_task->mpp, mpp);
856     }
857 
858     task = to_rkvenc_task(mpp_task);
859     task->irq_status = mpp->irq_status;
860 
861     mpp_debug(DEBUG_IRQ_STATUS, "%s irq_status: %08x\n", dev_name(mpp->dev), task->irq_status);
862 
863     if (task->irq_status & enc->hw_info->err_mask) {
864         atomic_inc(&mpp->reset_request);
865         /* dump register */
866         if (mpp_debug_unlikely(DEBUG_DUMP_ERR_REG)) {
867             mpp_task_dump_hw_reg(mpp, mpp_task);
868         }
869     }
870     mpp_task_finish(mpp_task->session, mpp_task);
871 
872     set_bit(mpp->core_id, &queue->core_idle);
873     mpp_dbg_core("core %d isr idle %lx\n", mpp->core_id, queue->core_idle);
874 
875     mpp_debug_leave();
876 
877     return IRQ_HANDLED;
878 }
879 
rkvenc_finish(struct mpp_dev * mpp,struct mpp_task * mpp_task)880 static int rkvenc_finish(struct mpp_dev *mpp, struct mpp_task *mpp_task)
881 {
882     u32 i, j;
883     u32 *reg;
884     struct rkvenc_task *task = to_rkvenc_task(mpp_task);
885 
886     mpp_debug_enter();
887 
888     for (i = 0; i < task->r_req_cnt; i++) {
889         int ret;
890         int s, e;
891         struct mpp_request msg;
892         struct mpp_request *req = &task->r_reqs[i];
893 
894         ret = rkvenc_get_class_msg(task, req->offset, &msg);
895         if (ret) {
896             return -EINVAL;
897         }
898         s = (req->offset - msg.offset) / sizeof(u32);
899         e = s + req->size / sizeof(u32);
900         reg = (u32 *)msg.data;
901         for (j = s; j < e; j++) {
902             reg[j] = mpp_read_relaxed(mpp, msg.offset + j * sizeof(u32));
903         }
904     }
905     /* revert hack for irq status */
906     reg = rkvenc_get_class_reg(task, task->hw_info->int_sta_base);
907     if (reg) {
908         *reg = task->irq_status;
909     }
910 
911     mpp_debug_leave();
912 
913     return 0;
914 }
915 
rkvenc_result(struct mpp_dev * mpp,struct mpp_task * mpp_task,struct mpp_task_msgs * msgs)916 static int rkvenc_result(struct mpp_dev *mpp, struct mpp_task *mpp_task, struct mpp_task_msgs *msgs)
917 {
918     u32 i;
919     struct rkvenc_task *task = to_rkvenc_task(mpp_task);
920 
921     mpp_debug_enter();
922 
923     for (i = 0; i < task->r_req_cnt; i++) {
924         struct mpp_request *req = &task->r_reqs[i];
925         u32 *reg = rkvenc_get_class_reg(task, req->offset);
926 
927         if (!reg) {
928             return -EINVAL;
929         }
930         if (copy_to_user(req->data, reg, req->size)) {
931             mpp_err("copy_to_user reg fail\n");
932             return -EIO;
933         }
934     }
935 
936     mpp_debug_leave();
937 
938     return 0;
939 }
940 
rkvenc_free_task(struct mpp_session * session,struct mpp_task * mpp_task)941 static int rkvenc_free_task(struct mpp_session *session, struct mpp_task *mpp_task)
942 {
943     struct rkvenc_task *task = to_rkvenc_task(mpp_task);
944 
945     mpp_task_finalize(session, mpp_task);
946     rkvenc_free_class_msg(task);
947     kfree(task);
948 
949     return 0;
950 }
951 
rkvenc_control(struct mpp_session * session,struct mpp_request * req)952 static int rkvenc_control(struct mpp_session *session, struct mpp_request *req)
953 {
954     switch (req->cmd) {
955         case MPP_CMD_SEND_CODEC_INFO: {
956             int i;
957             int cnt;
958             struct codec_info_elem elem;
959             struct rkvenc2_session_priv *priv;
960 
961             if (!session || !session->priv) {
962                 mpp_err("session info null\n");
963                 return -EINVAL;
964             }
965             priv = session->priv;
966 
967             cnt = req->size / sizeof(elem);
968             cnt = (cnt > ENC_INFO_BUTT) ? ENC_INFO_BUTT : cnt;
969             mpp_debug(DEBUG_IOCTL, "codec info count %d\n", cnt);
970             for (i = 0; i < cnt; i++) {
971                 if (copy_from_user(&elem, req->data + i * sizeof(elem), sizeof(elem))) {
972                     mpp_err("copy_from_user failed\n");
973                     continue;
974                 }
975                 if (elem.type > ENC_INFO_BASE && elem.type < ENC_INFO_BUTT && elem.flag > CODEC_INFO_FLAG_NULL &&
976                     elem.flag < CODEC_INFO_FLAG_BUTT) {
977                     elem.type = array_index_nospec(elem.type, ENC_INFO_BUTT);
978                     priv->codec_info[elem.type].flag = elem.flag;
979                     priv->codec_info[elem.type].val = elem.data;
980                 } else {
981                     mpp_err("codec info invalid, type %d, flag %d\n", elem.type, elem.flag);
982                 }
983             }
984             break;
985         }
986         default: {
987             mpp_err("unknown mpp ioctl cmd %x\n", req->cmd);
988             break;
989         }
990     }
991     return 0;
992 }
993 
rkvenc_free_session(struct mpp_session * session)994 static int rkvenc_free_session(struct mpp_session *session)
995 {
996     if (session && session->priv) {
997         kfree(session->priv);
998         session->priv = NULL;
999     }
1000     return 0;
1001 }
1002 
rkvenc_init_session(struct mpp_session * session)1003 static int rkvenc_init_session(struct mpp_session *session)
1004 {
1005     struct rkvenc2_session_priv *priv;
1006 
1007     if (!session) {
1008         mpp_err("session is null\n");
1009         return -EINVAL;
1010     }
1011 
1012     priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1013     if (!priv) {
1014         return -ENOMEM;
1015     }
1016 
1017     init_rwsem(&priv->rw_sem);
1018     session->priv = priv;
1019 
1020     return 0;
1021 }
1022 
1023 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
rkvenc_procfs_remove(struct mpp_dev * mpp)1024 static int rkvenc_procfs_remove(struct mpp_dev *mpp)
1025 {
1026     struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1027 
1028     if (enc->procfs) {
1029         proc_remove(enc->procfs);
1030         enc->procfs = NULL;
1031     }
1032 
1033     return 0;
1034 }
1035 
rkvenc_dump_session(struct mpp_session * session,struct seq_file * seq)1036 static int rkvenc_dump_session(struct mpp_session *session, struct seq_file *seq)
1037 {
1038     int i;
1039     struct rkvenc2_session_priv *priv = session->priv;
1040 
1041     down_read(&priv->rw_sem);
1042     /* item name */
1043     seq_puts(seq, "------------------------------------------------------");
1044     seq_puts(seq, "------------------------------------------------------\n");
1045     seq_printf(seq, "|%8s|", (const char *)"session");
1046     seq_printf(seq, "%8s|", (const char *)"device");
1047     for (i = ENC_INFO_BASE; i < ENC_INFO_BUTT; i++) {
1048         bool show = priv->codec_info[i].flag;
1049 
1050         if (show) {
1051             seq_printf(seq, "%8s|", enc_info_item_name[i]);
1052         }
1053     }
1054     seq_puts(seq, "\n");
1055     /* item data */
1056     seq_printf(seq, "|%8p|", session);
1057     seq_printf(seq, "%8s|", mpp_device_name[session->device_type]);
1058     for (i = ENC_INFO_BASE; i < ENC_INFO_BUTT; i++) {
1059         u32 flag = priv->codec_info[i].flag;
1060 
1061         if (!flag) {
1062             continue;
1063         }
1064         if (flag == CODEC_INFO_FLAG_NUMBER) {
1065             u32 data = priv->codec_info[i].val;
1066 
1067             seq_printf(seq, "%8d|", data);
1068         } else if (flag == CODEC_INFO_FLAG_STRING) {
1069             const char *name = (const char *)&priv->codec_info[i].val;
1070 
1071             seq_printf(seq, "%8s|", name);
1072         } else {
1073             seq_printf(seq, "%8s|", (const char *)"null");
1074         }
1075     }
1076     seq_puts(seq, "\n");
1077     up_read(&priv->rw_sem);
1078 
1079     return 0;
1080 }
1081 
rkvenc_show_session_info(struct seq_file * seq,void * offset)1082 static int rkvenc_show_session_info(struct seq_file *seq, void *offset)
1083 {
1084     struct mpp_session *session = NULL, *n;
1085     struct mpp_dev *mpp = seq->private;
1086 
1087     mutex_lock(&mpp->srv->session_lock);
1088     list_for_each_entry_safe(session, n, &mpp->srv->session_list, session_link)
1089     {
1090         if (session->device_type != MPP_DEVICE_RKVENC) {
1091             continue;
1092         }
1093         if (!session->priv) {
1094             continue;
1095         }
1096         if (mpp->dev_ops->dump_session) {
1097             mpp->dev_ops->dump_session(session, seq);
1098         }
1099     }
1100     mutex_unlock(&mpp->srv->session_lock);
1101 
1102     return 0;
1103 }
1104 
rkvenc_procfs_init(struct mpp_dev * mpp)1105 static int rkvenc_procfs_init(struct mpp_dev *mpp)
1106 {
1107     struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1108     char name[32];
1109 
1110     if (!mpp->dev || !mpp->dev->of_node || !mpp->dev->of_node->name || !mpp->srv || !mpp->srv->procfs) {
1111         return -EINVAL;
1112     }
1113 
1114     snprintf(name, sizeof(name) - 1, "%s%d", mpp->dev->of_node->name, mpp->core_id);
1115 
1116     enc->procfs = proc_mkdir(name, mpp->srv->procfs);
1117     if (IS_ERR_OR_NULL(enc->procfs)) {
1118         mpp_err("failed on open procfs\n");
1119         enc->procfs = NULL;
1120         return -EIO;
1121     }
1122     /* for debug */
1123     mpp_procfs_create_u32("aclk", RKVENC_MPP_PROCF, enc->procfs, &enc->aclk_info.debug_rate_hz);
1124     mpp_procfs_create_u32("clk_core", RKVENC_MPP_PROCF, enc->procfs, &enc->core_clk_info.debug_rate_hz);
1125     mpp_procfs_create_u32("session_buffers", RKVENC_MPP_PROCF, enc->procfs, &mpp->session_max_buffers);
1126     /* for show session info */
1127     proc_create_single_data("sessions-info", RKVENC_SINGLE, enc->procfs, rkvenc_show_session_info, mpp);
1128 
1129     return 0;
1130 }
1131 
rkvenc_procfs_ccu_init(struct mpp_dev * mpp)1132 static int rkvenc_procfs_ccu_init(struct mpp_dev *mpp)
1133 {
1134     struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1135 
1136     if (!enc->procfs) {
1137         goto done;
1138     }
1139 
1140     mpp_procfs_create_u32("disable_work", RKVENC_MPP_PROCF, enc->procfs, &enc->disable_work);
1141 done:
1142     return 0;
1143 }
1144 #else
rkvenc_procfs_remove(struct mpp_dev * mpp)1145 static inline int rkvenc_procfs_remove(struct mpp_dev *mpp)
1146 {
1147     return 0;
1148 }
1149 
rkvenc_procfs_init(struct mpp_dev * mpp)1150 static inline int rkvenc_procfs_init(struct mpp_dev *mpp)
1151 {
1152     return 0;
1153 }
1154 
rkvenc_procfs_ccu_init(struct mpp_dev * mpp)1155 static inline int rkvenc_procfs_ccu_init(struct mpp_dev *mpp)
1156 {
1157     return 0;
1158 }
1159 #endif
1160 
rkvenc_init(struct mpp_dev * mpp)1161 static int rkvenc_init(struct mpp_dev *mpp)
1162 {
1163     struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1164     int ret = 0;
1165 
1166     mpp->grf_info = &mpp->srv->grf_infos[MPP_DRIVER_RKVENC];
1167 
1168     /* Get clock info from dtsi */
1169     ret = mpp_get_clk_info(mpp, &enc->aclk_info, "aclk_vcodec");
1170     if (ret) {
1171         mpp_err("failed on clk_get aclk_vcodec\n");
1172     }
1173     ret = mpp_get_clk_info(mpp, &enc->hclk_info, "hclk_vcodec");
1174     if (ret) {
1175         mpp_err("failed on clk_get hclk_vcodec\n");
1176     }
1177     ret = mpp_get_clk_info(mpp, &enc->core_clk_info, "clk_core");
1178     if (ret) {
1179         mpp_err("failed on clk_get clk_core\n");
1180     }
1181     /* Get normal max workload from dtsi */
1182     of_property_read_u32(mpp->dev->of_node, "rockchip,default-max-load", &enc->default_max_load);
1183     /* Set default rates */
1184     mpp_set_clk_info_rate_hz(&enc->aclk_info, CLK_MODE_DEFAULT, 0X12c * MHZ);
1185     mpp_set_clk_info_rate_hz(&enc->core_clk_info, CLK_MODE_DEFAULT, 0x258 * MHZ);
1186 
1187     /* Get reset control from dtsi */
1188     enc->rst_a = mpp_reset_control_get(mpp, RST_TYPE_A, "video_a");
1189     if (!enc->rst_a) {
1190         mpp_err("No aclk reset resource define\n");
1191     }
1192     enc->rst_h = mpp_reset_control_get(mpp, RST_TYPE_H, "video_h");
1193     if (!enc->rst_h) {
1194         mpp_err("No hclk reset resource define\n");
1195     }
1196     enc->rst_core = mpp_reset_control_get(mpp, RST_TYPE_CORE, "video_core");
1197     if (!enc->rst_core) {
1198         mpp_err("No core reset resource define\n");
1199     }
1200 
1201     return 0;
1202 }
1203 
rkvenc_reset(struct mpp_dev * mpp)1204 static int rkvenc_reset(struct mpp_dev *mpp)
1205 {
1206     struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1207     struct rkvenc_hw_info *hw = enc->hw_info;
1208     struct mpp_taskqueue *queue = mpp->queue;
1209 
1210     mpp_debug_enter();
1211 
1212     /* safe reset */
1213     mpp_write(mpp, hw->int_mask_base, 0x3FF);
1214     mpp_write(mpp, hw->enc_clr_base, 0x1);
1215     udelay(0x5);
1216     mpp_write(mpp, hw->int_clr_base, 0xffffffff);
1217     mpp_write(mpp, hw->int_sta_base, 0);
1218 
1219     /* cru reset */
1220     if (enc->rst_a && enc->rst_h && enc->rst_core) {
1221         rockchip_pmu_idle_request(mpp->dev, true);
1222         mpp_safe_reset(enc->rst_a);
1223         mpp_safe_reset(enc->rst_h);
1224         mpp_safe_reset(enc->rst_core);
1225         udelay(0x5);
1226         mpp_safe_unreset(enc->rst_a);
1227         mpp_safe_unreset(enc->rst_h);
1228         mpp_safe_unreset(enc->rst_core);
1229         rockchip_pmu_idle_request(mpp->dev, false);
1230     }
1231 
1232     set_bit(mpp->core_id, &queue->core_idle);
1233     mpp_dbg_core("core %d reset idle %lx\n", mpp->core_id, queue->core_idle);
1234 
1235     mpp_debug_leave();
1236 
1237     return 0;
1238 }
1239 
rkvenc_clk_on(struct mpp_dev * mpp)1240 static int rkvenc_clk_on(struct mpp_dev *mpp)
1241 {
1242     struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1243 
1244     mpp_clk_safe_enable(enc->aclk_info.clk);
1245     mpp_clk_safe_enable(enc->hclk_info.clk);
1246     mpp_clk_safe_enable(enc->core_clk_info.clk);
1247 
1248     return 0;
1249 }
1250 
rkvenc_clk_off(struct mpp_dev * mpp)1251 static int rkvenc_clk_off(struct mpp_dev *mpp)
1252 {
1253     struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1254 
1255     clk_disable_unprepare(enc->aclk_info.clk);
1256     clk_disable_unprepare(enc->hclk_info.clk);
1257     clk_disable_unprepare(enc->core_clk_info.clk);
1258 
1259     return 0;
1260 }
1261 
rkvenc_set_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)1262 static int rkvenc_set_freq(struct mpp_dev *mpp, struct mpp_task *mpp_task)
1263 {
1264     struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1265     struct rkvenc_task *task = to_rkvenc_task(mpp_task);
1266 
1267     mpp_clk_set_rate(&enc->aclk_info, task->clk_mode);
1268     mpp_clk_set_rate(&enc->core_clk_info, task->clk_mode);
1269 
1270     return 0;
1271 }
1272 
1273 static struct mpp_hw_ops rkvenc_hw_ops = {
1274     .init = rkvenc_init,
1275     .clk_on = rkvenc_clk_on,
1276     .clk_off = rkvenc_clk_off,
1277     .set_freq = rkvenc_set_freq,
1278     .reset = rkvenc_reset,
1279 };
1280 
1281 static struct mpp_dev_ops rkvenc_dev_ops_v2 = {
1282     .alloc_task = rkvenc_alloc_task,
1283     .run = rkvenc_run,
1284     .irq = rkvenc_irq,
1285     .isr = rkvenc_isr,
1286     .finish = rkvenc_finish,
1287     .result = rkvenc_result,
1288     .free_task = rkvenc_free_task,
1289     .ioctl = rkvenc_control,
1290     .init_session = rkvenc_init_session,
1291     .free_session = rkvenc_free_session,
1292     .dump_session = rkvenc_dump_session,
1293 };
1294 
1295 static struct mpp_dev_ops rkvenc_ccu_dev_ops = {
1296     .alloc_task = rkvenc_ccu_alloc_task,
1297     .prepare = rkvenc2_prepare,
1298     .run = rkvenc_run,
1299     .irq = rkvenc_irq,
1300     .isr = rkvenc_isr,
1301     .finish = rkvenc_finish,
1302     .result = rkvenc_result,
1303     .free_task = rkvenc_free_task,
1304     .ioctl = rkvenc_control,
1305     .init_session = rkvenc_init_session,
1306     .free_session = rkvenc_free_session,
1307     .dump_session = rkvenc_dump_session,
1308 };
1309 
1310 static const struct mpp_dev_var rkvenc_v2_data = {
1311     .device_type = MPP_DEVICE_RKVENC,
1312     .hw_info = &rkvenc_v2_hw_info.hw,
1313     .trans_info = trans_rkvenc_v2,
1314     .hw_ops = &rkvenc_hw_ops,
1315     .dev_ops = &rkvenc_dev_ops_v2,
1316 };
1317 
1318 static const struct mpp_dev_var rkvenc_ccu_data = {
1319     .device_type = MPP_DEVICE_RKVENC,
1320     .hw_info = &rkvenc_v2_hw_info.hw,
1321     .trans_info = trans_rkvenc_v2,
1322     .hw_ops = &rkvenc_hw_ops,
1323     .dev_ops = &rkvenc_ccu_dev_ops,
1324 };
1325 
1326 static const struct of_device_id mpp_rkvenc_dt_match[] = {
1327     {
1328         .compatible = "rockchip,rkv-encoder-v2",
1329         .data = &rkvenc_v2_data,
1330     },
1331 #ifdef CONFIG_CPU_RK3588
1332     {
1333         .compatible = "rockchip,rkv-encoder-v2-core",
1334         .data = &rkvenc_ccu_data,
1335     },
1336     {
1337         .compatible = "rockchip,rkv-encoder-v2-ccu",
1338     },
1339 #endif
1340     {},
1341 };
1342 
rkvenc_ccu_probe(struct platform_device * pdev)1343 static int rkvenc_ccu_probe(struct platform_device *pdev)
1344 {
1345     struct rkvenc_ccu *ccu;
1346     struct device *dev = &pdev->dev;
1347 
1348     ccu = devm_kzalloc(dev, sizeof(*ccu), GFP_KERNEL);
1349     if (!ccu) {
1350         return -ENOMEM;
1351     }
1352 
1353     platform_set_drvdata(pdev, ccu);
1354 
1355     mutex_init(&ccu->lock);
1356     INIT_LIST_HEAD(&ccu->core_list);
1357 
1358     return 0;
1359 }
1360 
rkvenc_attach_ccu(struct device * dev,struct rkvenc_dev * enc)1361 static int rkvenc_attach_ccu(struct device *dev, struct rkvenc_dev *enc)
1362 {
1363     struct device_node *np;
1364     struct platform_device *pdev;
1365     struct rkvenc_ccu *ccu;
1366 
1367     mpp_debug_enter();
1368 
1369     np = of_parse_phandle(dev->of_node, "rockchip,ccu", 0);
1370     if (!np || !of_device_is_available(np)) {
1371         return -ENODEV;
1372     }
1373 
1374     pdev = of_find_device_by_node(np);
1375     of_node_put(np);
1376     if (!pdev) {
1377         return -ENODEV;
1378     }
1379 
1380     ccu = platform_get_drvdata(pdev);
1381     if (!ccu) {
1382         return -ENOMEM;
1383     }
1384 
1385     INIT_LIST_HEAD(&enc->core_link);
1386     mutex_lock(&ccu->lock);
1387     ccu->core_num++;
1388     list_add_tail(&enc->core_link, &ccu->core_list);
1389     mutex_unlock(&ccu->lock);
1390 
1391     /* attach the ccu-domain to current core */
1392     if (!ccu->main_core) {
1393         /**
1394          * set the first device for the main-core,
1395          * then the domain of the main-core named ccu-domain
1396          */
1397         ccu->main_core = &enc->mpp;
1398     } else {
1399         struct mpp_iommu_info *ccu_info, *cur_info;
1400 
1401         /* set the ccu-domain for current device */
1402         ccu_info = ccu->main_core->iommu_info;
1403         cur_info = enc->mpp.iommu_info;
1404 
1405         cur_info->domain = ccu_info->domain;
1406         cur_info->rw_sem = ccu_info->rw_sem;
1407         mpp_iommu_attach(cur_info);
1408     }
1409     enc->ccu = ccu;
1410 
1411     dev_info(dev, "attach ccu as core %d\n", enc->mpp.core_id);
1412     mpp_debug_enter();
1413 
1414     return 0;
1415 }
1416 
rkvenc2_alloc_rcbbuf(struct platform_device * pdev,struct rkvenc_dev * enc)1417 static int rkvenc2_alloc_rcbbuf(struct platform_device *pdev, struct rkvenc_dev *enc)
1418 {
1419     int ret;
1420     u32 vals[2];
1421     dma_addr_t iova;
1422     u32 sram_used, sram_size;
1423     struct device_node *sram_np;
1424     struct resource sram_res;
1425     resource_size_t sram_start, sram_end;
1426     struct iommu_domain *domain;
1427     struct device *dev = &pdev->dev;
1428 
1429     /* get rcb iova start and size */
1430     ret = device_property_read_u32_array(dev, "rockchip,rcb-iova", vals, 0X2);
1431     if (ret) {
1432         return ret;
1433     }
1434 
1435     iova = PAGE_ALIGN(vals[0]);
1436     sram_used = PAGE_ALIGN(vals[1]);
1437     if (!sram_used) {
1438         dev_err(dev, "sram rcb invalid.\n");
1439         return -EINVAL;
1440     }
1441     /* alloc reserve iova for rcb */
1442     ret = iommu_dma_reserve_iova(dev, iova, sram_used);
1443     if (ret) {
1444         dev_err(dev, "alloc rcb iova error.\n");
1445         return ret;
1446     }
1447     /* get sram device node */
1448     sram_np = of_parse_phandle(dev->of_node, "rockchip,sram", 0);
1449     if (!sram_np) {
1450         dev_err(dev, "could not find phandle sram\n");
1451         return -ENODEV;
1452     }
1453     /* get sram start and size */
1454     ret = of_address_to_resource(sram_np, 0, &sram_res);
1455     of_node_put(sram_np);
1456     if (ret) {
1457         dev_err(dev, "find sram res error\n");
1458         return ret;
1459     }
1460     /* check sram start and size is PAGE_SIZE align */
1461     sram_start = round_up(sram_res.start, PAGE_SIZE);
1462     sram_end = round_down(sram_res.start + resource_size(&sram_res), PAGE_SIZE);
1463     if (sram_end <= sram_start) {
1464         dev_err(dev, "no available sram, phy_start %pa, phy_end %pa\n", &sram_start, &sram_end);
1465         return -ENOMEM;
1466     }
1467     sram_size = sram_end - sram_start;
1468     sram_size = sram_used < sram_size ? sram_used : sram_size;
1469     /* iova map to sram */
1470     domain = enc->mpp.iommu_info->domain;
1471     ret = iommu_map(domain, iova, sram_start, sram_size, IOMMU_READ | IOMMU_WRITE);
1472     if (ret) {
1473         dev_err(dev, "sram iommu_map error.\n");
1474         return ret;
1475     }
1476     /* alloc dma for the remaining buffer, sram + dma */
1477     if (sram_size < sram_used) {
1478         struct page *page;
1479         size_t page_size = PAGE_ALIGN(sram_used - sram_size);
1480 
1481         page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(page_size));
1482         if (!page) {
1483             dev_err(dev, "unable to allocate pages\n");
1484             ret = -ENOMEM;
1485             goto err_sram_map;
1486         }
1487         /* iova map to dma */
1488         ret = iommu_map(domain, iova + sram_size, page_to_phys(page), page_size, IOMMU_READ | IOMMU_WRITE);
1489         if (ret) {
1490             dev_err(dev, "page iommu_map error.\n");
1491             __free_pages(page, get_order(page_size));
1492             goto err_sram_map;
1493         }
1494         enc->rcb_page = page;
1495     }
1496 
1497     enc->sram_size = sram_size;
1498     enc->sram_used = sram_used;
1499     enc->sram_iova = iova;
1500     enc->sram_enabled = -1;
1501     dev_info(dev, "sram_start %pa\n", &sram_start);
1502     dev_info(dev, "sram_iova %pad\n", &enc->sram_iova);
1503     dev_info(dev, "sram_size %u\n", enc->sram_size);
1504     dev_info(dev, "sram_used %u\n", enc->sram_used);
1505 
1506     return 0;
1507 
1508 err_sram_map:
1509     iommu_unmap(domain, iova, sram_size);
1510 
1511     return ret;
1512 }
1513 
rkvenc_core_probe(struct platform_device * pdev)1514 static int rkvenc_core_probe(struct platform_device *pdev)
1515 {
1516     int ret = 0;
1517     struct device *dev = &pdev->dev;
1518     struct rkvenc_dev *enc = NULL;
1519     struct mpp_dev *mpp = NULL;
1520 
1521     enc = devm_kzalloc(dev, sizeof(*enc), GFP_KERNEL);
1522     if (!enc) {
1523         return -ENOMEM;
1524     }
1525 
1526     mpp = &enc->mpp;
1527     platform_set_drvdata(pdev, enc);
1528 
1529     if (pdev->dev.of_node) {
1530         struct device_node *np = pdev->dev.of_node;
1531         const struct of_device_id *match = NULL;
1532 
1533         match = of_match_node(mpp_rkvenc_dt_match, np);
1534         if (match) {
1535             mpp->var = (struct mpp_dev_var *)match->data;
1536         }
1537 
1538         mpp->core_id = of_alias_get_id(np, "rkvenc");
1539     }
1540 
1541     ret = mpp_dev_probe(mpp, pdev);
1542     if (ret) {
1543         return ret;
1544     }
1545 
1546     rkvenc2_alloc_rcbbuf(pdev, enc);
1547 
1548     /* attach core to ccu */
1549     ret = rkvenc_attach_ccu(dev, enc);
1550     if (ret) {
1551         dev_err(dev, "attach ccu failed\n");
1552         return ret;
1553     }
1554 
1555     ret = devm_request_threaded_irq(dev, mpp->irq, mpp_dev_irq, mpp_dev_isr_sched, IRQF_SHARED, dev_name(dev), mpp);
1556     if (ret) {
1557         dev_err(dev, "register interrupter runtime failed\n");
1558         return -EINVAL;
1559     }
1560     mpp->session_max_buffers = RKVENC_SESSION_MAX_BUFFERS;
1561     enc->hw_info = to_rkvenc_info(mpp->var->hw_info);
1562     rkvenc_procfs_init(mpp);
1563     rkvenc_procfs_ccu_init(mpp);
1564 
1565     /* if current is main-core, register current device to mpp service */
1566     if (mpp == enc->ccu->main_core) {
1567         mpp_dev_register_srv(mpp, mpp->srv);
1568     }
1569 
1570     return 0;
1571 }
1572 
rkvenc_probe_default(struct platform_device * pdev)1573 static int rkvenc_probe_default(struct platform_device *pdev)
1574 {
1575     int ret = 0;
1576     struct device *dev = &pdev->dev;
1577     struct rkvenc_dev *enc = NULL;
1578     struct mpp_dev *mpp = NULL;
1579     const struct of_device_id *match = NULL;
1580 
1581     enc = devm_kzalloc(dev, sizeof(*enc), GFP_KERNEL);
1582     if (!enc) {
1583         return -ENOMEM;
1584     }
1585 
1586     mpp = &enc->mpp;
1587     platform_set_drvdata(pdev, enc);
1588 
1589     if (pdev->dev.of_node) {
1590         match = of_match_node(mpp_rkvenc_dt_match, pdev->dev.of_node);
1591         if (match) {
1592             mpp->var = (struct mpp_dev_var *)match->data;
1593         }
1594     }
1595 
1596     ret = mpp_dev_probe(mpp, pdev);
1597     if (ret) {
1598         return ret;
1599     }
1600 
1601     rkvenc2_alloc_rcbbuf(pdev, enc);
1602 
1603     ret = devm_request_threaded_irq(dev, mpp->irq, mpp_dev_irq, mpp_dev_isr_sched, IRQF_SHARED, dev_name(dev), mpp);
1604     if (ret) {
1605         dev_err(dev, "register interrupter runtime failed\n");
1606         goto failed_get_irq;
1607     }
1608     mpp->session_max_buffers = RKVENC_SESSION_MAX_BUFFERS;
1609     enc->hw_info = to_rkvenc_info(mpp->var->hw_info);
1610     rkvenc_procfs_init(mpp);
1611     mpp_dev_register_srv(mpp, mpp->srv);
1612 
1613     return 0;
1614 
1615 failed_get_irq:
1616     mpp_dev_remove(mpp);
1617 
1618     return ret;
1619 }
1620 
rkvenc_probe(struct platform_device * pdev)1621 static int rkvenc_probe(struct platform_device *pdev)
1622 {
1623     int ret = 0;
1624     struct device *dev = &pdev->dev;
1625     struct device_node *np = dev->of_node;
1626 
1627     dev_info(dev, "probing start\n");
1628 
1629     if (strstr(np->name, "ccu")) {
1630         ret = rkvenc_ccu_probe(pdev);
1631     } else if (strstr(np->name, "core")) {
1632         ret = rkvenc_core_probe(pdev);
1633     } else {
1634         ret = rkvenc_probe_default(pdev);
1635     }
1636 
1637     dev_info(dev, "probing finish\n");
1638 
1639     return ret;
1640 }
1641 
rkvenc2_free_rcbbuf(struct platform_device * pdev,struct rkvenc_dev * enc)1642 static int rkvenc2_free_rcbbuf(struct platform_device *pdev, struct rkvenc_dev *enc)
1643 {
1644     struct iommu_domain *domain;
1645 
1646     if (enc->rcb_page) {
1647         size_t page_size = PAGE_ALIGN(enc->sram_used - enc->sram_size);
1648 
1649         __free_pages(enc->rcb_page, get_order(page_size));
1650     }
1651     if (enc->sram_iova) {
1652         domain = enc->mpp.iommu_info->domain;
1653         iommu_unmap(domain, enc->sram_iova, enc->sram_used);
1654     }
1655 
1656     return 0;
1657 }
1658 
rkvenc_remove(struct platform_device * pdev)1659 static int rkvenc_remove(struct platform_device *pdev)
1660 {
1661     struct device *dev = &pdev->dev;
1662     struct device_node *np = dev->of_node;
1663 
1664     if (strstr(np->name, "ccu")) {
1665         dev_info(dev, "remove ccu\n");
1666     } else if (strstr(np->name, "core")) {
1667         struct rkvenc_dev *enc = platform_get_drvdata(pdev);
1668 
1669         dev_info(dev, "remove core\n");
1670         if (enc->ccu) {
1671             mutex_lock(&enc->ccu->lock);
1672             list_del_init(&enc->core_link);
1673             enc->ccu->core_num--;
1674             mutex_unlock(&enc->ccu->lock);
1675         }
1676         rkvenc2_free_rcbbuf(pdev, enc);
1677         mpp_dev_remove(&enc->mpp);
1678         rkvenc_procfs_remove(&enc->mpp);
1679     } else {
1680         struct rkvenc_dev *enc = platform_get_drvdata(pdev);
1681 
1682         dev_info(dev, "remove device\n");
1683         rkvenc2_free_rcbbuf(pdev, enc);
1684         mpp_dev_remove(&enc->mpp);
1685         rkvenc_procfs_remove(&enc->mpp);
1686     }
1687 
1688     return 0;
1689 }
1690 
rkvenc_shutdown(struct platform_device * pdev)1691 static void rkvenc_shutdown(struct platform_device *pdev)
1692 {
1693     struct device *dev = &pdev->dev;
1694 
1695     if (!strstr(dev_name(dev), "ccu")) {
1696         int ret;
1697         int val;
1698         struct rkvenc_dev *enc = platform_get_drvdata(pdev);
1699         struct mpp_dev *mpp = &enc->mpp;
1700 
1701         dev_info(dev, "shutdown device\n");
1702 
1703         if (mpp->srv) {
1704             atomic_inc(&mpp->srv->shutdown_request);
1705         }
1706 
1707         ret = readx_poll_timeout(atomic_read, &mpp->task_count, val, val == 0, 0x3E8, 0x30D40);
1708         if (ret == -ETIMEDOUT) {
1709             dev_err(dev, "wait total running time out\n");
1710         }
1711     }
1712     dev_info(dev, "shutdown success\n");
1713 }
1714 
1715 struct platform_driver rockchip_rkvenc2_driver = {
1716     .probe = rkvenc_probe,
1717     .remove = rkvenc_remove,
1718     .shutdown = rkvenc_shutdown,
1719     .driver =
1720         {
1721             .name = RKVENC_DRIVER_NAME,
1722             .of_match_table = of_match_ptr(mpp_rkvenc_dt_match),
1723         },
1724 };
1725