1 /* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
2 /*
3 * Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd
4 *
5 * author:
6 * Alpha Lin, alpha.lin@rock-chips.com
7 * Randy Li, randy.li@rock-chips.com
8 * Ding Wei, leo.ding@rock-chips.com
9 *
10 */
11 #ifndef __ROCKCHIP_MPP_COMMON_H__
12 #define __ROCKCHIP_MPP_COMMON_H__
13
14 #include <linux/cdev.h>
15 #include <linux/clk.h>
16 #include <linux/dma-buf.h>
17 #include <linux/kfifo.h>
18 #include <linux/types.h>
19 #include <linux/time.h>
20 #include <linux/workqueue.h>
21 #include <linux/kthread.h>
22 #include <linux/reset.h>
23 #include <linux/irqreturn.h>
24 #include <linux/poll.h>
25 #include <linux/platform_device.h>
26 #include <soc/rockchip/pm_domains.h>
27
28 #define MHZ (1000 * 1000)
29
30 #define MPP_MAX_MSG_NUM (16)
31 #define MPP_MAX_REG_TRANS_NUM (60)
32 #define MPP_MAX_TASK_CAPACITY (16)
33 /* define flags for mpp_request */
34 #define MPP_FLAGS_MULTI_MSG (0x00000001)
35 #define MPP_FLAGS_LAST_MSG (0x00000002)
36 #define MPP_FLAGS_REG_FD_NO_TRANS (0x00000004)
37 #define MPP_FLAGS_SCL_FD_NO_TRANS (0x00000008)
38 #define MPP_FLAGS_REG_NO_OFFSET (0x00000010)
39 #define MPP_FLAGS_SECURE_MODE (0x00010000)
40
41 /* grf mask for get value */
42 #define MPP_GRF_VAL_MASK (0xFFFF)
43
44 /* max 4 cores supported */
45 #define MPP_MAX_CORE_NUM (4)
46
47 /**
48 * Device type: classified by hardware feature
49 */
50 enum MPP_DEVICE_TYPE {
51 MPP_DEVICE_VDPU1 = 0, /* 0x00000001 */
52 MPP_DEVICE_VDPU2 = 1, /* 0x00000002 */
53 MPP_DEVICE_VDPU1_PP = 2, /* 0x00000004 */
54 MPP_DEVICE_VDPU2_PP = 3, /* 0x00000008 */
55 MPP_DEVICE_AV1DEC = 4, /* 0x00000010 */
56
57 MPP_DEVICE_HEVC_DEC = 8, /* 0x00000100 */
58 MPP_DEVICE_RKVDEC = 9, /* 0x00000200 */
59 MPP_DEVICE_AVSPLUS_DEC = 12, /* 0x00001000 */
60 MPP_DEVICE_JPGDEC = 13, /* 0x00002000 */
61
62 MPP_DEVICE_RKVENC = 16, /* 0x00010000 */
63 MPP_DEVICE_VEPU1 = 17, /* 0x00020000 */
64 MPP_DEVICE_VEPU2 = 18, /* 0x00040000 */
65 MPP_DEVICE_VEPU22 = 24, /* 0x01000000 */
66
67 MPP_DEVICE_IEP2 = 28, /* 0x10000000 */
68 MPP_DEVICE_BUTT,
69 };
70
71 /**
72 * Driver type: classified by driver
73 */
74 enum MPP_DRIVER_TYPE {
75 MPP_DRIVER_NULL = 0,
76 MPP_DRIVER_VDPU1,
77 MPP_DRIVER_VEPU1,
78 MPP_DRIVER_VDPU2,
79 MPP_DRIVER_VEPU2,
80 MPP_DRIVER_VEPU22,
81 MPP_DRIVER_RKVDEC,
82 MPP_DRIVER_RKVENC,
83 MPP_DRIVER_IEP,
84 MPP_DRIVER_IEP2,
85 MPP_DRIVER_JPGDEC,
86 MPP_DRIVER_RKVDEC2,
87 MPP_DRIVER_RKVENC2,
88 MPP_DRIVER_AV1DEC,
89 MPP_DRIVER_BUTT,
90 };
91
92 /**
93 * Command type: keep the same as user space
94 */
95 enum MPP_DEV_COMMAND_TYPE {
96 MPP_CMD_QUERY_BASE = 0,
97 MPP_CMD_QUERY_HW_SUPPORT = MPP_CMD_QUERY_BASE + 0,
98 MPP_CMD_QUERY_HW_ID = MPP_CMD_QUERY_BASE + 1,
99 MPP_CMD_QUERY_CMD_SUPPORT = MPP_CMD_QUERY_BASE + 2,
100 MPP_CMD_QUERY_BUTT,
101
102 MPP_CMD_INIT_BASE = 0x100,
103 MPP_CMD_INIT_CLIENT_TYPE = MPP_CMD_INIT_BASE + 0,
104 MPP_CMD_INIT_DRIVER_DATA = MPP_CMD_INIT_BASE + 1,
105 MPP_CMD_INIT_TRANS_TABLE = MPP_CMD_INIT_BASE + 2,
106 MPP_CMD_INIT_BUTT,
107
108 MPP_CMD_SEND_BASE = 0x200,
109 MPP_CMD_SET_REG_WRITE = MPP_CMD_SEND_BASE + 0,
110 MPP_CMD_SET_REG_READ = MPP_CMD_SEND_BASE + 1,
111 MPP_CMD_SET_REG_ADDR_OFFSET = MPP_CMD_SEND_BASE + 2,
112 MPP_CMD_SET_RCB_INFO = MPP_CMD_SEND_BASE + 3,
113 MPP_CMD_SET_SESSION_FD = MPP_CMD_SEND_BASE + 4,
114 MPP_CMD_SEND_BUTT,
115
116 MPP_CMD_POLL_BASE = 0x300,
117 MPP_CMD_POLL_HW_FINISH = MPP_CMD_POLL_BASE + 0,
118 MPP_CMD_POLL_HW_IRQ = MPP_CMD_POLL_BASE + 1,
119 MPP_CMD_POLL_BUTT,
120
121 MPP_CMD_CONTROL_BASE = 0x400,
122 MPP_CMD_RESET_SESSION = MPP_CMD_CONTROL_BASE + 0,
123 MPP_CMD_TRANS_FD_TO_IOVA = MPP_CMD_CONTROL_BASE + 1,
124 MPP_CMD_RELEASE_FD = MPP_CMD_CONTROL_BASE + 2,
125 MPP_CMD_SEND_CODEC_INFO = MPP_CMD_CONTROL_BASE + 3,
126 MPP_CMD_CONTROL_BUTT,
127
128 MPP_CMD_BUTT,
129 };
130
131 enum MPP_CLOCK_MODE {
132 CLK_MODE_BASE = 0,
133 CLK_MODE_DEFAULT = CLK_MODE_BASE,
134 CLK_MODE_DEBUG,
135 CLK_MODE_REDUCE,
136 CLK_MODE_NORMAL,
137 CLK_MODE_ADVANCED,
138 CLK_MODE_BUTT,
139 };
140
141 enum MPP_RESET_TYPE {
142 RST_TYPE_BASE = 0,
143 RST_TYPE_A = RST_TYPE_BASE,
144 RST_TYPE_H,
145 RST_TYPE_NIU_A,
146 RST_TYPE_NIU_H,
147 RST_TYPE_CORE,
148 RST_TYPE_CABAC,
149 RST_TYPE_HEVC_CABAC,
150 RST_TYPE_BUTT,
151 };
152
153 enum ENC_INFO_TYPE {
154 ENC_INFO_BASE = 0,
155 ENC_INFO_WIDTH,
156 ENC_INFO_HEIGHT,
157 ENC_INFO_FORMAT,
158 ENC_INFO_FPS_IN,
159 ENC_INFO_FPS_OUT,
160 ENC_INFO_RC_MODE,
161 ENC_INFO_BITRATE,
162 ENC_INFO_GOP_SIZE,
163 ENC_INFO_FPS_CALC,
164 ENC_INFO_PROFILE,
165
166 ENC_INFO_BUTT,
167 };
168
169 enum DEC_INFO_TYPE {
170 DEC_INFO_BASE = 0,
171 DEC_INFO_WIDTH,
172 DEC_INFO_HEIGHT,
173 DEC_INFO_FORMAT,
174 DEC_INFO_BITDEPTH,
175 DEC_INFO_FPS,
176
177 DEC_INFO_BUTT,
178 };
179
180 enum CODEC_INFO_FLAGS {
181 CODEC_INFO_FLAG_NULL = 0,
182 CODEC_INFO_FLAG_NUMBER,
183 CODEC_INFO_FLAG_STRING,
184
185 CODEC_INFO_FLAG_BUTT,
186 };
187
188 struct mpp_task;
189 struct mpp_session;
190 struct mpp_dma_session;
191 struct mpp_taskqueue;
192
193 /* data common struct for parse out */
194 struct mpp_request {
195 __u32 cmd;
196 __u32 flags;
197 __u32 size;
198 __u32 offset;
199 void __user *data;
200 };
201
202 /* struct use to collect task set and poll message */
203 struct mpp_task_msgs {
204 /* for ioctl msgs bat process */
205 struct list_head list;
206 struct list_head list_session;
207
208 struct mpp_session *session;
209 struct mpp_taskqueue *queue;
210 struct mpp_task *task;
211 struct mpp_dev *mpp;
212
213 /* for fd reference */
214 int ext_fd;
215 struct fd f;
216
217 u32 flags;
218 u32 req_cnt;
219 u32 set_cnt;
220 u32 poll_cnt;
221
222 struct mpp_request reqs[MPP_MAX_MSG_NUM];
223 struct mpp_request *poll_req;
224 };
225
226 struct mpp_grf_info {
227 u32 offset;
228 u32 val;
229 struct regmap *grf;
230 };
231
232 /**
233 * struct for hardware info
234 */
235 struct mpp_hw_info {
236 /* register number */
237 u32 reg_num;
238 /* hardware id */
239 int reg_id;
240 u32 hw_id;
241 /* start index of register */
242 u32 reg_start;
243 /* end index of register */
244 u32 reg_end;
245 /* register of enable hardware */
246 int reg_en;
247 };
248
249 struct mpp_trans_info {
250 const int count;
251 const u16 * const table;
252 };
253
254 struct reg_offset_elem {
255 u32 index;
256 u32 offset;
257 };
258
259 struct reg_offset_info {
260 u32 cnt;
261 struct reg_offset_elem elem[MPP_MAX_REG_TRANS_NUM];
262 };
263
264 struct codec_info_elem {
265 __u32 type;
266 __u32 flag;
267 __u64 data;
268 };
269
270 struct mpp_clk_info {
271 struct clk *clk;
272
273 /* debug rate, from debug */
274 u32 debug_rate_hz;
275 /* normal rate, from dtsi */
276 u32 normal_rate_hz;
277 /* high performance rate, from dtsi */
278 u32 advanced_rate_hz;
279
280 u32 default_rate_hz;
281 u32 reduce_rate_hz;
282 /* record last used rate */
283 u32 used_rate_hz;
284 };
285
286 struct mpp_dev_var {
287 enum MPP_DEVICE_TYPE device_type;
288
289 /* info for each hardware */
290 struct mpp_hw_info *hw_info;
291 struct mpp_trans_info *trans_info;
292 struct mpp_hw_ops *hw_ops;
293 struct mpp_dev_ops *dev_ops;
294 };
295
296 struct mpp_mem_region {
297 struct list_head reg_link;
298 /* address for iommu */
299 dma_addr_t iova;
300 unsigned long len;
301 u32 reg_idx;
302 void *hdl;
303 int fd;
304 /* whether is dup import entity */
305 bool is_dup;
306 };
307
308
309 struct mpp_dev {
310 struct device *dev;
311 const struct mpp_dev_var *var;
312 struct mpp_hw_ops *hw_ops;
313 struct mpp_dev_ops *dev_ops;
314
315 /* per-device work for attached taskqueue */
316 struct kthread_work work;
317 /* the flag for get/get/reduce freq */
318 bool auto_freq_en;
319 /* the flag for pmu idle request before device reset */
320 bool skip_idle;
321
322 /*
323 * The task capacity is the task queue length that hardware can accept.
324 * Default 1 means normal hardware can only accept one task at once.
325 */
326 u32 task_capacity;
327 /*
328 * The message capacity is the max message parallel process capacity.
329 * Default 1 means normal hardware can only accept one message at one
330 * shot ioctl.
331 * Multi-core hardware can accept more message at one shot ioctl.
332 */
333 u32 msgs_cap;
334
335 int irq;
336 u32 irq_status;
337
338 void __iomem *reg_base;
339 struct mpp_grf_info *grf_info;
340 struct mpp_iommu_info *iommu_info;
341
342 atomic_t reset_request;
343 atomic_t session_index;
344 atomic_t task_count;
345 atomic_t task_index;
346 /* current task in running */
347 struct mpp_task *cur_task;
348 /* set session max buffers */
349 u32 session_max_buffers;
350 struct mpp_taskqueue *queue;
351 struct mpp_reset_group *reset_group;
352 /* point to MPP Service */
353 struct mpp_service *srv;
354
355 /* multi-core data */
356 struct list_head queue_link;
357 s32 core_id;
358 };
359
360 struct mpp_session {
361 enum MPP_DEVICE_TYPE device_type;
362 u32 index;
363 /* the session related device private data */
364 struct mpp_service *srv;
365 struct mpp_dev *mpp;
366 struct mpp_dma_session *dma;
367
368 /* lock for session task pending list */
369 struct mutex pending_lock;
370 /* task pending list in session */
371 struct list_head pending_list;
372
373 pid_t pid;
374 atomic_t task_count;
375 atomic_t release_request;
376 /* trans info set by user */
377 int trans_count;
378 u16 trans_table[MPP_MAX_REG_TRANS_NUM];
379 u32 msg_flags;
380 /* link to mpp_service session_list */
381 struct list_head service_link;
382 /* link to mpp_workqueue session_attach / session_detach */
383 struct list_head session_link;
384 /* private data */
385 void *priv;
386
387 /*
388 * session handler from mpp_dev_ops
389 * process_task - handle messages of sending task
390 * wait_result - handle messages of polling task
391 * deinit - handle session deinit
392 */
393 int (*process_task)(struct mpp_session *session,
394 struct mpp_task_msgs *msgs);
395 int (*wait_result)(struct mpp_session *session,
396 struct mpp_task_msgs *msgs);
397 void (*deinit)(struct mpp_session *session);
398
399 /* max message count */
400 int msgs_cnt;
401 struct list_head list_msgs;
402 struct list_head list_msgs_idle;
403 spinlock_t lock_msgs;
404 };
405
406 /* task state in work thread */
407 enum mpp_task_state {
408 TASK_STATE_PENDING = 0,
409 TASK_STATE_RUNNING = 1,
410 TASK_STATE_START = 2,
411 TASK_STATE_HANDLE = 3,
412 TASK_STATE_IRQ = 4,
413 TASK_STATE_FINISH = 5,
414 TASK_STATE_TIMEOUT = 6,
415 TASK_STATE_DONE = 7,
416
417 TASK_STATE_PREPARE = 8,
418 TASK_STATE_ABORT = 9,
419 TASK_STATE_ABORT_READY = 10,
420 TASK_STATE_PROC_DONE = 11,
421 };
422
423 /* The context for the a task */
424 struct mpp_task {
425 /* context belong to */
426 struct mpp_session *session;
427
428 /* link to pending list in session */
429 struct list_head pending_link;
430 /* link to done list in session */
431 struct list_head done_link;
432 /* link to list in taskqueue */
433 struct list_head queue_link;
434 /* The DMA buffer used in this task */
435 struct list_head mem_region_list;
436 u32 mem_count;
437 struct mpp_mem_region mem_regions[MPP_MAX_REG_TRANS_NUM];
438
439 /* state in the taskqueue */
440 unsigned long state;
441 atomic_t abort_request;
442 /* delayed work for hardware timeout */
443 struct delayed_work timeout_work;
444 struct kref ref;
445
446 /* record context running start time */
447 ktime_t start;
448 ktime_t part;
449 /* hardware info for current task */
450 struct mpp_hw_info *hw_info;
451 u32 task_index;
452 u32 task_id;
453 u32 *reg;
454 /* event for session wait thread */
455 wait_queue_head_t wait;
456
457 /* for multi-core */
458 struct mpp_dev *mpp;
459 s32 core_id;
460 };
461
462 struct mpp_taskqueue {
463 /* kworker for attached taskqueue */
464 struct kthread_worker worker;
465 /* task for work queue */
466 struct task_struct *kworker_task;
467
468 /* lock for session attach and session_detach */
469 struct mutex session_lock;
470 /* link to session session_link for attached sessions */
471 struct list_head session_attach;
472 /* link to session session_link for detached sessions */
473 struct list_head session_detach;
474 u32 detach_count;
475
476 atomic_t task_id;
477 /* lock for pending list */
478 struct mutex pending_lock;
479 struct list_head pending_list;
480 /* lock for running list */
481 spinlock_t running_lock;
482 struct list_head running_list;
483
484 /* point to MPP Service */
485 struct mpp_service *srv;
486 /* lock for mmu list */
487 struct mutex mmu_lock;
488 struct list_head mmu_list;
489 /* lock for dev list */
490 struct mutex dev_lock;
491 struct list_head dev_list;
492 /*
493 * task_capacity in taskqueue is the minimum task capacity of the
494 * device task capacity which is attached to the taskqueue
495 */
496 u32 task_capacity;
497
498 /* multi-core task distribution */
499 atomic_t reset_request;
500 struct mpp_dev *cores[MPP_MAX_CORE_NUM];
501 unsigned long core_idle;
502 u32 core_count;
503 };
504
505 struct mpp_reset_group {
506 /* the flag for whether use rw_sem */
507 u32 rw_sem_on;
508 struct rw_semaphore rw_sem;
509 struct reset_control *resets[RST_TYPE_BUTT];
510 /* for set rw_sem */
511 struct mpp_taskqueue *queue;
512 };
513
514 struct mpp_service {
515 struct class *cls;
516 struct device *dev;
517 dev_t dev_id;
518 struct cdev mpp_cdev;
519 struct device *child_dev;
520 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
521 struct proc_dir_entry *procfs;
522 #endif
523 unsigned long hw_support;
524 atomic_t shutdown_request;
525 /* follows for device probe */
526 struct mpp_grf_info grf_infos[MPP_DRIVER_BUTT];
527 struct platform_driver *sub_drivers[MPP_DRIVER_BUTT];
528 /* follows for attach service */
529 struct mpp_dev *sub_devices[MPP_DEVICE_BUTT];
530 u32 taskqueue_cnt;
531 struct mpp_taskqueue *task_queues[MPP_DEVICE_BUTT];
532 u32 reset_group_cnt;
533 struct mpp_reset_group *reset_groups[MPP_DEVICE_BUTT];
534
535 /* lock for session list */
536 struct mutex session_lock;
537 struct list_head session_list;
538 u32 session_count;
539 };
540
541 /*
542 * struct mpp_hw_ops - context specific operations for device
543 * @init Do something when hardware probe.
544 * @exit Do something when hardware remove.
545 * @clk_on Enable clocks.
546 * @clk_off Disable clocks.
547 * @get_freq Get special freq for setting.
548 * @set_freq Set freq to hardware.
549 * @reduce_freq Reduce freq when hardware is not running.
550 * @reset When error, reset hardware.
551 */
552 struct mpp_hw_ops {
553 int (*init)(struct mpp_dev *mpp);
554 int (*exit)(struct mpp_dev *mpp);
555 int (*clk_on)(struct mpp_dev *mpp);
556 int (*clk_off)(struct mpp_dev *mpp);
557 int (*get_freq)(struct mpp_dev *mpp,
558 struct mpp_task *mpp_task);
559 int (*set_freq)(struct mpp_dev *mpp,
560 struct mpp_task *mpp_task);
561 int (*reduce_freq)(struct mpp_dev *mpp);
562 int (*reset)(struct mpp_dev *mpp);
563 int (*set_grf)(struct mpp_dev *mpp);
564 };
565
566 /*
567 * struct mpp_dev_ops - context specific operations for task
568 * @alloc_task Alloc and set task.
569 * @prepare Check HW status for determining run next task or not.
570 * @run Start a single {en,de}coding run. Set registers to hardware.
571 * @irq Deal with hardware interrupt top-half.
572 * @isr Deal with hardware interrupt bottom-half.
573 * @finish Read back processing results and additional data from hardware.
574 * @result Read status to userspace.
575 * @free_task Release the resource allocate which alloc.
576 * @ioctl Special cammand from userspace.
577 * @init_session extra initialization on session init.
578 * @free_session extra cleanup on session deinit.
579 * @dump_session information dump for session.
580 * @dump_dev information dump for hardware device.
581 */
582 struct mpp_dev_ops {
583 int (*process_task)(struct mpp_session *session,
584 struct mpp_task_msgs *msgs);
585 int (*wait_result)(struct mpp_session *session,
586 struct mpp_task_msgs *msgs);
587 void (*deinit)(struct mpp_session *session);
588 void (*task_worker)(struct kthread_work *work_s);
589
590 void *(*alloc_task)(struct mpp_session *session,
591 struct mpp_task_msgs *msgs);
592 void *(*prepare)(struct mpp_dev *mpp, struct mpp_task *task);
593 int (*run)(struct mpp_dev *mpp, struct mpp_task *task);
594 int (*irq)(struct mpp_dev *mpp);
595 int (*isr)(struct mpp_dev *mpp);
596 int (*finish)(struct mpp_dev *mpp, struct mpp_task *task);
597 int (*result)(struct mpp_dev *mpp, struct mpp_task *task,
598 struct mpp_task_msgs *msgs);
599 int (*free_task)(struct mpp_session *session,
600 struct mpp_task *task);
601 int (*ioctl)(struct mpp_session *session, struct mpp_request *req);
602 int (*init_session)(struct mpp_session *session);
603 int (*free_session)(struct mpp_session *session);
604 int (*dump_session)(struct mpp_session *session, struct seq_file *seq);
605 int (*dump_dev)(struct mpp_dev *mpp);
606 };
607
608 struct mpp_taskqueue *mpp_taskqueue_init(struct device *dev);
609
610 struct mpp_mem_region *
611 mpp_task_attach_fd(struct mpp_task *task, int fd);
612 int mpp_translate_reg_address(struct mpp_session *session,
613 struct mpp_task *task, int fmt,
614 u32 *reg, struct reg_offset_info *off_inf);
615
616 int mpp_check_req(struct mpp_request *req, int base,
617 int max_size, u32 off_s, u32 off_e);
618 int mpp_extract_reg_offset_info(struct reg_offset_info *off_inf,
619 struct mpp_request *req);
620 int mpp_query_reg_offset_info(struct reg_offset_info *off_inf,
621 u32 index);
622 int mpp_translate_reg_offset_info(struct mpp_task *task,
623 struct reg_offset_info *off_inf,
624 u32 *reg);
625 int mpp_task_init(struct mpp_session *session,
626 struct mpp_task *task);
627 int mpp_task_finish(struct mpp_session *session,
628 struct mpp_task *task);
629 int mpp_task_finalize(struct mpp_session *session,
630 struct mpp_task *task);
631 int mpp_task_dump_mem_region(struct mpp_dev *mpp,
632 struct mpp_task *task);
633 int mpp_task_dump_reg(struct mpp_dev *mpp,
634 struct mpp_task *task);
635 int mpp_task_dump_hw_reg(struct mpp_dev *mpp);
636 void mpp_free_task(struct kref *ref);
637
638 int mpp_session_deinit(struct mpp_session *session);
639
640 int mpp_dev_probe(struct mpp_dev *mpp,
641 struct platform_device *pdev);
642 int mpp_dev_remove(struct mpp_dev *mpp);
643 void mpp_dev_shutdown(struct platform_device *pdev);
644 int mpp_dev_register_srv(struct mpp_dev *mpp, struct mpp_service *srv);
645
646 int mpp_power_on(struct mpp_dev *mpp);
647 int mpp_power_off(struct mpp_dev *mpp);
648 int mpp_dev_reset(struct mpp_dev *mpp);
649
650 irqreturn_t mpp_dev_irq(int irq, void *param);
651 irqreturn_t mpp_dev_isr_sched(int irq, void *param);
652
653 struct reset_control *mpp_reset_control_get(struct mpp_dev *mpp,
654 enum MPP_RESET_TYPE type,
655 const char *name);
656
657 u32 mpp_get_grf(struct mpp_grf_info *grf_info);
658 bool mpp_grf_is_changed(struct mpp_grf_info *grf_info);
659 int mpp_set_grf(struct mpp_grf_info *grf_info);
660
661 int mpp_time_record(struct mpp_task *task);
662 int mpp_time_diff(struct mpp_task *task);
663 int mpp_time_part_diff(struct mpp_task *task);
664
665 int mpp_write_req(struct mpp_dev *mpp, u32 *regs,
666 u32 start_idx, u32 end_idx, u32 en_idx);
667 int mpp_read_req(struct mpp_dev *mpp, u32 *regs,
668 u32 start_idx, u32 end_idx);
669
670 int mpp_get_clk_info(struct mpp_dev *mpp,
671 struct mpp_clk_info *clk_info,
672 const char *name);
673 int mpp_set_clk_info_rate_hz(struct mpp_clk_info *clk_info,
674 enum MPP_CLOCK_MODE mode,
675 unsigned long val);
676 unsigned long mpp_get_clk_info_rate_hz(struct mpp_clk_info *clk_info,
677 enum MPP_CLOCK_MODE mode);
678 int mpp_clk_set_rate(struct mpp_clk_info *clk_info,
679 enum MPP_CLOCK_MODE mode);
680
mpp_write(struct mpp_dev * mpp,u32 reg,u32 val)681 static inline int mpp_write(struct mpp_dev *mpp, u32 reg, u32 val)
682 {
683 int idx = reg / sizeof(u32);
684
685 mpp_debug(DEBUG_SET_REG,
686 "write reg[%03d]: %04x: 0x%08x\n", idx, reg, val);
687 writel(val, mpp->reg_base + reg);
688
689 return 0;
690 }
691
mpp_write_relaxed(struct mpp_dev * mpp,u32 reg,u32 val)692 static inline int mpp_write_relaxed(struct mpp_dev *mpp, u32 reg, u32 val)
693 {
694 int idx = reg / sizeof(u32);
695
696 mpp_debug(DEBUG_SET_REG,
697 "write reg[%03d]: %04x: 0x%08x\n", idx, reg, val);
698 writel_relaxed(val, mpp->reg_base + reg);
699
700 return 0;
701 }
702
mpp_read(struct mpp_dev * mpp,u32 reg)703 static inline u32 mpp_read(struct mpp_dev *mpp, u32 reg)
704 {
705 u32 val = 0;
706 int idx = reg / sizeof(u32);
707
708 val = readl(mpp->reg_base + reg);
709 mpp_debug(DEBUG_GET_REG,
710 "read reg[%03d]: %04x: 0x%08x\n", idx, reg, val);
711
712 return val;
713 }
714
mpp_read_relaxed(struct mpp_dev * mpp,u32 reg)715 static inline u32 mpp_read_relaxed(struct mpp_dev *mpp, u32 reg)
716 {
717 u32 val = 0;
718 int idx = reg / sizeof(u32);
719
720 val = readl_relaxed(mpp->reg_base + reg);
721 mpp_debug(DEBUG_GET_REG,
722 "read reg[%03d] %04x: 0x%08x\n", idx, reg, val);
723
724 return val;
725 }
726
mpp_safe_reset(struct reset_control * rst)727 static inline int mpp_safe_reset(struct reset_control *rst)
728 {
729 if (rst)
730 reset_control_assert(rst);
731
732 return 0;
733 }
734
mpp_safe_unreset(struct reset_control * rst)735 static inline int mpp_safe_unreset(struct reset_control *rst)
736 {
737 if (rst)
738 reset_control_deassert(rst);
739
740 return 0;
741 }
742
mpp_clk_safe_enable(struct clk * clk)743 static inline int mpp_clk_safe_enable(struct clk *clk)
744 {
745 if (clk)
746 clk_prepare_enable(clk);
747
748 return 0;
749 }
750
mpp_clk_safe_disable(struct clk * clk)751 static inline int mpp_clk_safe_disable(struct clk *clk)
752 {
753 if (clk)
754 clk_disable_unprepare(clk);
755
756 return 0;
757 }
758
mpp_reset_down_read(struct mpp_reset_group * group)759 static inline int mpp_reset_down_read(struct mpp_reset_group *group)
760 {
761 if (group && group->rw_sem_on)
762 down_read(&group->rw_sem);
763
764 return 0;
765 }
766
mpp_reset_up_read(struct mpp_reset_group * group)767 static inline int mpp_reset_up_read(struct mpp_reset_group *group)
768 {
769 if (group && group->rw_sem_on)
770 up_read(&group->rw_sem);
771
772 return 0;
773 }
774
mpp_reset_down_write(struct mpp_reset_group * group)775 static inline int mpp_reset_down_write(struct mpp_reset_group *group)
776 {
777 if (group && group->rw_sem_on)
778 down_write(&group->rw_sem);
779
780 return 0;
781 }
782
mpp_reset_up_write(struct mpp_reset_group * group)783 static inline int mpp_reset_up_write(struct mpp_reset_group *group)
784 {
785 if (group && group->rw_sem_on)
786 up_write(&group->rw_sem);
787
788 return 0;
789 }
790
mpp_pmu_idle_request(struct mpp_dev * mpp,bool idle)791 static inline int mpp_pmu_idle_request(struct mpp_dev *mpp, bool idle)
792 {
793 if (mpp->skip_idle)
794 return 0;
795
796 return rockchip_pmu_idle_request(mpp->dev, idle);
797 }
798
799 static inline struct mpp_dev *
mpp_get_task_used_device(const struct mpp_task * task,const struct mpp_session * session)800 mpp_get_task_used_device(const struct mpp_task *task,
801 const struct mpp_session *session)
802 {
803 return task->mpp ? task->mpp : session->mpp;
804 }
805
806 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
807 struct proc_dir_entry *
808 mpp_procfs_create_u32(const char *name, umode_t mode,
809 struct proc_dir_entry *parent, void *data);
810 #else
811 static inline struct proc_dir_entry *
mpp_procfs_create_u32(const char * name,umode_t mode,struct proc_dir_entry * parent,void * data)812 mpp_procfs_create_u32(const char *name, umode_t mode,
813 struct proc_dir_entry *parent, void *data)
814 {
815 return 0;
816 }
817 #endif
818
819 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
820 extern const char *mpp_device_name[MPP_DEVICE_BUTT];
821 extern const char *enc_info_item_name[ENC_INFO_BUTT];
822 #endif
823
824 extern const struct file_operations rockchip_mpp_fops;
825
826 extern struct platform_driver rockchip_rkvdec_driver;
827 extern struct platform_driver rockchip_rkvenc_driver;
828 extern struct platform_driver rockchip_vdpu1_driver;
829 extern struct platform_driver rockchip_vepu1_driver;
830 extern struct platform_driver rockchip_vdpu2_driver;
831 extern struct platform_driver rockchip_vepu2_driver;
832 extern struct platform_driver rockchip_vepu22_driver;
833 extern struct platform_driver rockchip_iep2_driver;
834 extern struct platform_driver rockchip_jpgdec_driver;
835 extern struct platform_driver rockchip_rkvdec2_driver;
836 extern struct platform_driver rockchip_rkvenc2_driver;
837 extern struct platform_driver rockchip_av1dec_driver;
838 extern struct platform_driver rockchip_av1_iommu_driver;
839
840 extern struct platform_device *av1dec_device_create(void);
841 extern int av1dec_driver_register(struct platform_driver *drv);
842 extern struct bus_type av1dec_bus;
843
844 #endif
845