1 // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2 /*
3 * Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd
4 *
5 * author:
6 * Alpha Lin, alpha.lin@rock-chips.com
7 * Randy Li, randy.li@rock-chips.com
8 * Ding Wei, leo.ding@rock-chips.com
9 *
10 */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/clk.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/iopoll.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/of_platform.h>
21 #include <linux/of_irq.h>
22 #include <linux/proc_fs.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/poll.h>
25 #include <linux/regmap.h>
26 #include <linux/rwsem.h>
27 #include <linux/mfd/syscon.h>
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
30 #include <linux/uaccess.h>
31 #include <linux/nospec.h>
32
33 #include <soc/rockchip/pm_domains.h>
34
35 #include "mpp_debug.h"
36 #include "mpp_common.h"
37 #include "mpp_iommu.h"
38
39 #define MPP_WORK_TIMEOUT_DELAY (200)
40 #define MPP_WAIT_TIMEOUT_DELAY (2000)
41
42 /* Use 'v' as magic number */
43 #define MPP_IOC_MAGIC 'v'
44
45 #define MPP_IOC_CFG_V1 _IOW(MPP_IOC_MAGIC, 1, unsigned int)
46 #define MPP_IOC_CFG_V2 _IOW(MPP_IOC_MAGIC, 2, unsigned int)
47
48 /* input parmater structure for version 1 */
49 struct mpp_msg_v1 {
50 __u32 cmd;
51 __u32 flags;
52 __u32 size;
53 __u32 offset;
54 __u64 data_ptr;
55 };
56
57 #define MPP_BAT_MSG_DONE (0x00000001)
58
59 struct mpp_bat_msg {
60 __u64 flag;
61 __u32 fd;
62 __s32 ret;
63 };
64
65 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
66 const char *mpp_device_name[MPP_DEVICE_BUTT] = {
67 [MPP_DEVICE_VDPU1] = "VDPU1",
68 [MPP_DEVICE_VDPU2] = "VDPU2",
69 [MPP_DEVICE_VDPU1_PP] = "VDPU1_PP",
70 [MPP_DEVICE_VDPU2_PP] = "VDPU2_PP",
71 [MPP_DEVICE_AV1DEC] = "AV1DEC",
72 [MPP_DEVICE_HEVC_DEC] = "HEVC_DEC",
73 [MPP_DEVICE_RKVDEC] = "RKVDEC",
74 [MPP_DEVICE_AVSPLUS_DEC] = "AVSPLUS_DEC",
75 [MPP_DEVICE_RKVENC] = "RKVENC",
76 [MPP_DEVICE_VEPU1] = "VEPU1",
77 [MPP_DEVICE_VEPU2] = "VEPU2",
78 [MPP_DEVICE_VEPU22] = "VEPU22",
79 [MPP_DEVICE_IEP2] = "IEP2",
80 };
81
82 const char *enc_info_item_name[ENC_INFO_BUTT] = {
83 [ENC_INFO_BASE] = "null",
84 [ENC_INFO_WIDTH] = "width",
85 [ENC_INFO_HEIGHT] = "height",
86 [ENC_INFO_FORMAT] = "format",
87 [ENC_INFO_FPS_IN] = "fps_in",
88 [ENC_INFO_FPS_OUT] = "fps_out",
89 [ENC_INFO_RC_MODE] = "rc_mode",
90 [ENC_INFO_BITRATE] = "bitrate",
91 [ENC_INFO_GOP_SIZE] = "gop_size",
92 [ENC_INFO_FPS_CALC] = "fps_calc",
93 [ENC_INFO_PROFILE] = "profile",
94 };
95
96 #endif
97
98 static void mpp_attach_workqueue(struct mpp_dev *mpp,
99 struct mpp_taskqueue *queue);
100
101 static int
mpp_taskqueue_pop_pending(struct mpp_taskqueue * queue,struct mpp_task * task)102 mpp_taskqueue_pop_pending(struct mpp_taskqueue *queue,
103 struct mpp_task *task)
104 {
105 if (!task->session || !task->session->mpp)
106 return -EINVAL;
107
108 mutex_lock(&queue->pending_lock);
109 list_del_init(&task->queue_link);
110 mutex_unlock(&queue->pending_lock);
111 kref_put(&task->ref, mpp_free_task);
112
113 return 0;
114 }
115
116 static struct mpp_task *
mpp_taskqueue_get_pending_task(struct mpp_taskqueue * queue)117 mpp_taskqueue_get_pending_task(struct mpp_taskqueue *queue)
118 {
119 struct mpp_task *task = NULL;
120
121 mutex_lock(&queue->pending_lock);
122 task = list_first_entry_or_null(&queue->pending_list,
123 struct mpp_task,
124 queue_link);
125 mutex_unlock(&queue->pending_lock);
126
127 return task;
128 }
129
130 static bool
mpp_taskqueue_is_running(struct mpp_taskqueue * queue)131 mpp_taskqueue_is_running(struct mpp_taskqueue *queue)
132 {
133 unsigned long flags;
134 bool flag;
135
136 spin_lock_irqsave(&queue->running_lock, flags);
137 flag = !list_empty(&queue->running_list);
138 spin_unlock_irqrestore(&queue->running_lock, flags);
139
140 return flag;
141 }
142
143 static int
mpp_taskqueue_pending_to_run(struct mpp_taskqueue * queue,struct mpp_task * task)144 mpp_taskqueue_pending_to_run(struct mpp_taskqueue *queue,
145 struct mpp_task *task)
146 {
147 unsigned long flags;
148
149 mutex_lock(&queue->pending_lock);
150 spin_lock_irqsave(&queue->running_lock, flags);
151 list_move_tail(&task->queue_link, &queue->running_list);
152 spin_unlock_irqrestore(&queue->running_lock, flags);
153
154 mutex_unlock(&queue->pending_lock);
155
156 return 0;
157 }
158
159 static struct mpp_task *
mpp_taskqueue_get_running_task(struct mpp_taskqueue * queue)160 mpp_taskqueue_get_running_task(struct mpp_taskqueue *queue)
161 {
162 unsigned long flags;
163 struct mpp_task *task = NULL;
164
165 spin_lock_irqsave(&queue->running_lock, flags);
166 task = list_first_entry_or_null(&queue->running_list,
167 struct mpp_task,
168 queue_link);
169 spin_unlock_irqrestore(&queue->running_lock, flags);
170
171 return task;
172 }
173
174 static int
mpp_taskqueue_pop_running(struct mpp_taskqueue * queue,struct mpp_task * task)175 mpp_taskqueue_pop_running(struct mpp_taskqueue *queue,
176 struct mpp_task *task)
177 {
178 unsigned long flags;
179
180 if (!task->session || !task->session->mpp)
181 return -EINVAL;
182
183 spin_lock_irqsave(&queue->running_lock, flags);
184 list_del_init(&task->queue_link);
185 spin_unlock_irqrestore(&queue->running_lock, flags);
186 kref_put(&task->ref, mpp_free_task);
187
188 return 0;
189 }
190
191 static void
mpp_taskqueue_trigger_work(struct mpp_dev * mpp)192 mpp_taskqueue_trigger_work(struct mpp_dev *mpp)
193 {
194 kthread_queue_work(&mpp->queue->worker, &mpp->work);
195 }
196
mpp_power_on(struct mpp_dev * mpp)197 int mpp_power_on(struct mpp_dev *mpp)
198 {
199 pm_runtime_get_sync(mpp->dev);
200 pm_stay_awake(mpp->dev);
201
202 if (mpp->hw_ops->clk_on)
203 mpp->hw_ops->clk_on(mpp);
204
205 return 0;
206 }
207
mpp_power_off(struct mpp_dev * mpp)208 int mpp_power_off(struct mpp_dev *mpp)
209 {
210 if (mpp->hw_ops->clk_off)
211 mpp->hw_ops->clk_off(mpp);
212
213 pm_relax(mpp->dev);
214 if (mpp_taskqueue_get_pending_task(mpp->queue) ||
215 mpp_taskqueue_get_running_task(mpp->queue)) {
216 pm_runtime_mark_last_busy(mpp->dev);
217 pm_runtime_put_autosuspend(mpp->dev);
218 } else {
219 pm_runtime_put_sync_suspend(mpp->dev);
220 }
221
222 return 0;
223 }
224
task_msgs_reset(struct mpp_task_msgs * msgs)225 static void task_msgs_reset(struct mpp_task_msgs *msgs)
226 {
227 list_del_init(&msgs->list);
228
229 msgs->flags = 0;
230 msgs->req_cnt = 0;
231 msgs->set_cnt = 0;
232 msgs->poll_cnt = 0;
233 }
234
task_msgs_init(struct mpp_task_msgs * msgs,struct mpp_session * session)235 static void task_msgs_init(struct mpp_task_msgs *msgs, struct mpp_session *session)
236 {
237 INIT_LIST_HEAD(&msgs->list);
238
239 msgs->session = session;
240 msgs->queue = NULL;
241 msgs->task = NULL;
242 msgs->mpp = NULL;
243
244 msgs->ext_fd = -1;
245
246 task_msgs_reset(msgs);
247 }
248
get_task_msgs(struct mpp_session * session)249 static struct mpp_task_msgs *get_task_msgs(struct mpp_session *session)
250 {
251 unsigned long flags;
252 struct mpp_task_msgs *msgs;
253
254 spin_lock_irqsave(&session->lock_msgs, flags);
255 msgs = list_first_entry_or_null(&session->list_msgs_idle,
256 struct mpp_task_msgs, list_session);
257 if (msgs) {
258 list_move_tail(&msgs->list_session, &session->list_msgs);
259 spin_unlock_irqrestore(&session->lock_msgs, flags);
260
261 return msgs;
262 }
263 spin_unlock_irqrestore(&session->lock_msgs, flags);
264
265 msgs = kzalloc(sizeof(*msgs), GFP_KERNEL);
266 task_msgs_init(msgs, session);
267 INIT_LIST_HEAD(&msgs->list_session);
268
269 spin_lock_irqsave(&session->lock_msgs, flags);
270 list_move_tail(&msgs->list_session, &session->list_msgs);
271 session->msgs_cnt++;
272 spin_unlock_irqrestore(&session->lock_msgs, flags);
273
274 mpp_debug_func(DEBUG_TASK_INFO, "session %p:%d msgs cnt %d\n",
275 session, session->index, session->msgs_cnt);
276
277 return msgs;
278 }
279
put_task_msgs(struct mpp_task_msgs * msgs)280 static void put_task_msgs(struct mpp_task_msgs *msgs)
281 {
282 struct mpp_session *session = msgs->session;
283 unsigned long flags;
284
285 if (!session) {
286 pr_err("invalid msgs without session\n");
287 return;
288 }
289
290 if (msgs->ext_fd >= 0) {
291 fdput(msgs->f);
292 msgs->ext_fd = -1;
293 }
294
295 task_msgs_reset(msgs);
296
297 spin_lock_irqsave(&session->lock_msgs, flags);
298 list_move_tail(&msgs->list_session, &session->list_msgs_idle);
299 spin_unlock_irqrestore(&session->lock_msgs, flags);
300 }
301
clear_task_msgs(struct mpp_session * session)302 static void clear_task_msgs(struct mpp_session *session)
303 {
304 struct mpp_task_msgs *msgs, *n;
305 LIST_HEAD(list_to_free);
306 unsigned long flags;
307
308 spin_lock_irqsave(&session->lock_msgs, flags);
309
310 list_for_each_entry_safe(msgs, n, &session->list_msgs, list_session)
311 list_move_tail(&msgs->list_session, &list_to_free);
312
313 list_for_each_entry_safe(msgs, n, &session->list_msgs_idle, list_session)
314 list_move_tail(&msgs->list_session, &list_to_free);
315
316 spin_unlock_irqrestore(&session->lock_msgs, flags);
317
318 list_for_each_entry_safe(msgs, n, &list_to_free, list_session)
319 kfree(msgs);
320 }
321
mpp_session_clear(struct mpp_dev * mpp,struct mpp_session * session)322 static int mpp_session_clear(struct mpp_dev *mpp,
323 struct mpp_session *session)
324 {
325 struct mpp_task *task = NULL, *n;
326
327 /* clear session pending list */
328 mutex_lock(&session->pending_lock);
329 list_for_each_entry_safe(task, n,
330 &session->pending_list,
331 pending_link) {
332 /* abort task in taskqueue */
333 atomic_inc(&task->abort_request);
334 list_del_init(&task->pending_link);
335 kref_put(&task->ref, mpp_free_task);
336 }
337 mutex_unlock(&session->pending_lock);
338
339 return 0;
340 }
341
mpp_session_init(void)342 static struct mpp_session *mpp_session_init(void)
343 {
344 struct mpp_session *session = kzalloc(sizeof(*session), GFP_KERNEL);
345
346 if (!session)
347 return NULL;
348
349 session->pid = current->pid;
350
351 mutex_init(&session->pending_lock);
352 INIT_LIST_HEAD(&session->pending_list);
353 INIT_LIST_HEAD(&session->service_link);
354 INIT_LIST_HEAD(&session->session_link);
355
356 atomic_set(&session->task_count, 0);
357 atomic_set(&session->release_request, 0);
358
359 INIT_LIST_HEAD(&session->list_msgs);
360 INIT_LIST_HEAD(&session->list_msgs_idle);
361 spin_lock_init(&session->lock_msgs);
362
363 mpp_dbg_session("session %p init\n", session);
364 return session;
365 }
366
mpp_session_deinit_default(struct mpp_session * session)367 static void mpp_session_deinit_default(struct mpp_session *session)
368 {
369 if (session->mpp) {
370 struct mpp_dev *mpp = session->mpp;
371
372 if (mpp->dev_ops->free_session)
373 mpp->dev_ops->free_session(session);
374
375 mpp_session_clear(mpp, session);
376
377 if (session->dma) {
378 mpp_iommu_down_read(mpp->iommu_info);
379 mpp_dma_session_destroy(session->dma);
380 mpp_iommu_up_read(mpp->iommu_info);
381 session->dma = NULL;
382 }
383 }
384
385 if (session->srv) {
386 struct mpp_service *srv = session->srv;
387
388 mutex_lock(&srv->session_lock);
389 list_del_init(&session->service_link);
390 mutex_unlock(&srv->session_lock);
391 }
392
393 list_del_init(&session->session_link);
394 }
395
mpp_session_deinit(struct mpp_session * session)396 int mpp_session_deinit(struct mpp_session *session)
397 {
398 u32 task_count = atomic_read(&session->task_count);
399
400 mpp_dbg_session("session %p:%d task %d release\n",
401 session, session->index, task_count);
402 if (task_count)
403 return -1;
404
405 if (likely(session->deinit))
406 session->deinit(session);
407 else
408 pr_err("invalid NULL session deinit function\n");
409
410 mpp_dbg_session("session %p:%d deinit\n", session, session->index);
411
412 clear_task_msgs(session);
413
414 kfree(session);
415 return 0;
416 }
417
mpp_session_attach_workqueue(struct mpp_session * session,struct mpp_taskqueue * queue)418 static void mpp_session_attach_workqueue(struct mpp_session *session,
419 struct mpp_taskqueue *queue)
420 {
421 mpp_dbg_session("session %p:%d attach\n", session, session->index);
422 mutex_lock(&queue->session_lock);
423 list_add_tail(&session->session_link, &queue->session_attach);
424 mutex_unlock(&queue->session_lock);
425 }
426
mpp_session_detach_workqueue(struct mpp_session * session)427 static void mpp_session_detach_workqueue(struct mpp_session *session)
428 {
429 struct mpp_taskqueue *queue;
430 struct mpp_dev *mpp;
431
432 if (!session->mpp || !session->mpp->queue)
433 return;
434
435 mpp_dbg_session("session %p:%d detach\n", session, session->index);
436 mpp = session->mpp;
437 queue = mpp->queue;
438
439 mutex_lock(&queue->session_lock);
440 list_del_init(&session->session_link);
441 list_add_tail(&session->session_link, &queue->session_detach);
442 queue->detach_count++;
443 mutex_unlock(&queue->session_lock);
444
445 mpp_taskqueue_trigger_work(mpp);
446 }
447
448 static int
mpp_session_push_pending(struct mpp_session * session,struct mpp_task * task)449 mpp_session_push_pending(struct mpp_session *session,
450 struct mpp_task *task)
451 {
452 kref_get(&task->ref);
453 mutex_lock(&session->pending_lock);
454 list_add_tail(&task->pending_link, &session->pending_list);
455 mutex_unlock(&session->pending_lock);
456
457 return 0;
458 }
459
460 static int
mpp_session_pop_pending(struct mpp_session * session,struct mpp_task * task)461 mpp_session_pop_pending(struct mpp_session *session,
462 struct mpp_task *task)
463 {
464 mutex_lock(&session->pending_lock);
465 list_del_init(&task->pending_link);
466 mutex_unlock(&session->pending_lock);
467 kref_put(&task->ref, mpp_free_task);
468
469 return 0;
470 }
471
472 static struct mpp_task *
mpp_session_get_pending_task(struct mpp_session * session)473 mpp_session_get_pending_task(struct mpp_session *session)
474 {
475 struct mpp_task *task = NULL;
476
477 mutex_lock(&session->pending_lock);
478 task = list_first_entry_or_null(&session->pending_list,
479 struct mpp_task,
480 pending_link);
481 mutex_unlock(&session->pending_lock);
482
483 return task;
484 }
485
mpp_free_task(struct kref * ref)486 void mpp_free_task(struct kref *ref)
487 {
488 struct mpp_dev *mpp;
489 struct mpp_session *session;
490 struct mpp_task *task = container_of(ref, struct mpp_task, ref);
491
492 if (!task->session) {
493 mpp_err("task %p, task->session is null.\n", task);
494 return;
495 }
496 session = task->session;
497
498 mpp_debug_func(DEBUG_TASK_INFO, "task %d:%d free state 0x%lx abort %d\n",
499 session->index, task->task_id, task->state,
500 atomic_read(&task->abort_request));
501
502 mpp = mpp_get_task_used_device(task, session);
503 if (mpp->dev_ops->free_task)
504 mpp->dev_ops->free_task(session, task);
505
506 /* Decrease reference count */
507 atomic_dec(&session->task_count);
508 atomic_dec(&mpp->task_count);
509 }
510
mpp_task_timeout_work(struct work_struct * work_s)511 static void mpp_task_timeout_work(struct work_struct *work_s)
512 {
513 struct mpp_dev *mpp;
514 struct mpp_session *session;
515 struct mpp_task *task = container_of(to_delayed_work(work_s),
516 struct mpp_task,
517 timeout_work);
518
519 if (test_and_set_bit(TASK_STATE_HANDLE, &task->state)) {
520 mpp_err("task has been handled\n");
521 return;
522 }
523
524 mpp_err("task %p processing time out!\n", task);
525 if (!task->session) {
526 mpp_err("task %p, task->session is null.\n", task);
527 return;
528 }
529 session = task->session;
530
531 if (!session->mpp) {
532 mpp_err("session %p, session->mpp is null.\n", session);
533 return;
534 }
535
536 mpp = mpp_get_task_used_device(task, session);
537 /* hardware maybe dead, reset it */
538 mpp_reset_up_read(mpp->reset_group);
539 mpp_dev_reset(mpp);
540 mpp_power_off(mpp);
541
542 set_bit(TASK_STATE_TIMEOUT, &task->state);
543 set_bit(TASK_STATE_DONE, &task->state);
544 /* Wake up the GET thread */
545 wake_up(&task->wait);
546
547 /* remove task from taskqueue running list */
548 mpp_taskqueue_pop_running(mpp->queue, task);
549 }
550
mpp_process_task_default(struct mpp_session * session,struct mpp_task_msgs * msgs)551 static int mpp_process_task_default(struct mpp_session *session,
552 struct mpp_task_msgs *msgs)
553 {
554 struct mpp_task *task = NULL;
555 struct mpp_dev *mpp = session->mpp;
556
557 if (unlikely(!mpp)) {
558 mpp_err("pid %d client %d found invalid process function\n",
559 session->pid, session->device_type);
560 return -EINVAL;
561 }
562
563 if (mpp->dev_ops->alloc_task)
564 task = mpp->dev_ops->alloc_task(session, msgs);
565 if (!task) {
566 mpp_err("alloc_task failed.\n");
567 return -ENOMEM;
568 }
569 /* ensure current device */
570 mpp = mpp_get_task_used_device(task, session);
571
572 kref_init(&task->ref);
573 init_waitqueue_head(&task->wait);
574 atomic_set(&task->abort_request, 0);
575 task->task_index = atomic_fetch_inc(&mpp->task_index);
576 task->task_id = atomic_fetch_inc(&mpp->queue->task_id);
577 INIT_DELAYED_WORK(&task->timeout_work, mpp_task_timeout_work);
578
579 if (mpp->auto_freq_en && mpp->hw_ops->get_freq)
580 mpp->hw_ops->get_freq(mpp, task);
581
582 msgs->queue = mpp->queue;
583 msgs->task = task;
584 msgs->mpp = mpp;
585
586 /*
587 * Push task to session should be in front of push task to queue.
588 * Otherwise, when mpp_task_finish finish and worker_thread call
589 * task worker, it may be get a task who has push in queue but
590 * not in session, cause some errors.
591 */
592 atomic_inc(&session->task_count);
593 mpp_session_push_pending(session, task);
594
595 return 0;
596 }
597
mpp_process_task(struct mpp_session * session,struct mpp_task_msgs * msgs)598 static int mpp_process_task(struct mpp_session *session,
599 struct mpp_task_msgs *msgs)
600 {
601 if (likely(session->process_task))
602 return session->process_task(session, msgs);
603
604 pr_err("invalid NULL process task function\n");
605 return -EINVAL;
606 }
607
608 struct reset_control *
mpp_reset_control_get(struct mpp_dev * mpp,enum MPP_RESET_TYPE type,const char * name)609 mpp_reset_control_get(struct mpp_dev *mpp, enum MPP_RESET_TYPE type, const char *name)
610 {
611 int index;
612 struct reset_control *rst = NULL;
613 char shared_name[32] = "shared_";
614 struct mpp_reset_group *group;
615
616 /* check reset whether belone to device alone */
617 index = of_property_match_string(mpp->dev->of_node, "reset-names", name);
618 if (index >= 0) {
619 rst = devm_reset_control_get(mpp->dev, name);
620 mpp_safe_unreset(rst);
621
622 return rst;
623 }
624
625 /* check reset whether is shared */
626 strncat(shared_name, name,
627 sizeof(shared_name) - strlen(shared_name) - 1);
628 index = of_property_match_string(mpp->dev->of_node,
629 "reset-names", shared_name);
630 if (index < 0) {
631 dev_err(mpp->dev, "%s is not found!\n", shared_name);
632 return NULL;
633 }
634
635 if (!mpp->reset_group) {
636 dev_err(mpp->dev, "reset group is empty!\n");
637 return NULL;
638 }
639 group = mpp->reset_group;
640
641 down_write(&group->rw_sem);
642 rst = group->resets[type];
643 if (!rst) {
644 rst = devm_reset_control_get(mpp->dev, shared_name);
645 mpp_safe_unreset(rst);
646 group->resets[type] = rst;
647 group->queue = mpp->queue;
648 }
649 /* if reset not in the same queue, it means different device
650 * may reset in the same time, then rw_sem_on should set true.
651 */
652 group->rw_sem_on |= (group->queue != mpp->queue) ? true : false;
653 dev_info(mpp->dev, "reset_group->rw_sem_on=%d\n", group->rw_sem_on);
654 up_write(&group->rw_sem);
655
656 return rst;
657 }
658
mpp_dev_reset(struct mpp_dev * mpp)659 int mpp_dev_reset(struct mpp_dev *mpp)
660 {
661 dev_info(mpp->dev, "resetting...\n");
662
663 /*
664 * before running, we have to switch grf ctrl bit to ensure
665 * working in current hardware
666 */
667 if (mpp->hw_ops->set_grf)
668 mpp->hw_ops->set_grf(mpp);
669 else
670 mpp_set_grf(mpp->grf_info);
671
672 if (mpp->auto_freq_en && mpp->hw_ops->reduce_freq)
673 mpp->hw_ops->reduce_freq(mpp);
674 /* FIXME lock resource lock of the other devices in combo */
675 mpp_iommu_down_write(mpp->iommu_info);
676 mpp_reset_down_write(mpp->reset_group);
677 atomic_set(&mpp->reset_request, 0);
678
679 if (mpp->hw_ops->reset)
680 mpp->hw_ops->reset(mpp);
681
682 /* Note: if the domain does not change, iommu attach will be return
683 * as an empty operation. Therefore, force to close and then open,
684 * will be update the domain. In this way, domain can really attach.
685 */
686 mpp_iommu_refresh(mpp->iommu_info, mpp->dev);
687
688 mpp_reset_up_write(mpp->reset_group);
689 mpp_iommu_up_write(mpp->iommu_info);
690
691 dev_info(mpp->dev, "reset done\n");
692
693 return 0;
694 }
695
mpp_task_run(struct mpp_dev * mpp,struct mpp_task * task)696 static int mpp_task_run(struct mpp_dev *mpp,
697 struct mpp_task *task)
698 {
699 int ret;
700
701 mpp_debug_enter();
702
703 /*
704 * before running, we have to switch grf ctrl bit to ensure
705 * working in current hardware
706 */
707 if (mpp->hw_ops->set_grf) {
708 ret = mpp->hw_ops->set_grf(mpp);
709 if (ret) {
710 dev_err(mpp->dev, "set grf failed\n");
711 return ret;
712 }
713 } else {
714 mpp_set_grf(mpp->grf_info);
715 }
716
717 mpp_power_on(mpp);
718 mpp_debug_func(DEBUG_TASK_INFO, "pid %d run %s\n",
719 task->session->pid, dev_name(mpp->dev));
720
721 if (mpp->auto_freq_en && mpp->hw_ops->set_freq)
722 mpp->hw_ops->set_freq(mpp, task);
723 /*
724 * TODO: Lock the reader locker of the device resource lock here,
725 * release at the finish operation
726 */
727 mpp_reset_down_read(mpp->reset_group);
728
729 set_bit(TASK_STATE_START, &task->state);
730 mpp_time_record(task);
731 schedule_delayed_work(&task->timeout_work,
732 msecs_to_jiffies(MPP_WORK_TIMEOUT_DELAY));
733 if (mpp->dev_ops->run)
734 mpp->dev_ops->run(mpp, task);
735
736 mpp_debug_leave();
737
738 return 0;
739 }
740
mpp_task_worker_default(struct kthread_work * work_s)741 static void mpp_task_worker_default(struct kthread_work *work_s)
742 {
743 struct mpp_task *task;
744 struct mpp_dev *mpp = container_of(work_s, struct mpp_dev, work);
745 struct mpp_taskqueue *queue = mpp->queue;
746
747 mpp_debug_enter();
748
749 again:
750 task = mpp_taskqueue_get_pending_task(queue);
751 if (!task)
752 goto done;
753
754 /* if task timeout and aborted, remove it */
755 if (atomic_read(&task->abort_request) > 0) {
756 mpp_taskqueue_pop_pending(queue, task);
757 goto again;
758 }
759
760 /* get device for current task */
761 mpp = task->session->mpp;
762
763 /*
764 * In the link table mode, the prepare function of the device
765 * will check whether I can insert a new task into device.
766 * If the device supports the task status query(like the HEVC
767 * encoder), it can report whether the device is busy.
768 * If the device does not support multiple task or task status
769 * query, leave this job to mpp service.
770 */
771 if (mpp->dev_ops->prepare)
772 task = mpp->dev_ops->prepare(mpp, task);
773 else if (mpp_taskqueue_is_running(queue))
774 task = NULL;
775
776 /*
777 * FIXME if the hardware supports task query, but we still need to lock
778 * the running list and lock the mpp service in the current state.
779 */
780 /* Push a pending task to running queue */
781 if (task) {
782 struct mpp_dev *task_mpp = mpp_get_task_used_device(task, task->session);
783
784 mpp_taskqueue_pending_to_run(queue, task);
785 set_bit(TASK_STATE_RUNNING, &task->state);
786 if (mpp_task_run(task_mpp, task))
787 mpp_taskqueue_pop_running(queue, task);
788 else
789 goto again;
790 }
791
792 done:
793 mutex_lock(&queue->session_lock);
794 while (queue->detach_count) {
795 struct mpp_session *session = NULL;
796
797 session = list_first_entry_or_null(&queue->session_detach, struct mpp_session,
798 session_link);
799 if (session) {
800 list_del_init(&session->session_link);
801 queue->detach_count--;
802 }
803
804 mutex_unlock(&queue->session_lock);
805
806 if (session) {
807 mpp_dbg_session("%s detach count %d\n", dev_name(mpp->dev),
808 queue->detach_count);
809 mpp_session_deinit(session);
810 }
811
812 mutex_lock(&queue->session_lock);
813 }
814 mutex_unlock(&queue->session_lock);
815 }
816
mpp_wait_result_default(struct mpp_session * session,struct mpp_task_msgs * msgs)817 static int mpp_wait_result_default(struct mpp_session *session,
818 struct mpp_task_msgs *msgs)
819 {
820 int ret;
821 struct mpp_task *task;
822 struct mpp_dev *mpp;
823
824 task = mpp_session_get_pending_task(session);
825 if (!task) {
826 mpp_err("session %p pending list is empty!\n", session);
827 return -EIO;
828 }
829 mpp = mpp_get_task_used_device(task, session);
830
831 ret = wait_event_timeout(task->wait,
832 test_bit(TASK_STATE_DONE, &task->state),
833 msecs_to_jiffies(MPP_WAIT_TIMEOUT_DELAY));
834 if (ret > 0) {
835 if (mpp->dev_ops->result)
836 ret = mpp->dev_ops->result(mpp, task, msgs);
837 } else {
838 atomic_inc(&task->abort_request);
839 set_bit(TASK_STATE_ABORT, &task->state);
840 mpp_err("timeout, pid %d session %p:%d count %d cur_task %p id %d\n",
841 session->pid, session, session->index,
842 atomic_read(&session->task_count), task,
843 task->task_id);
844 }
845
846 mpp_debug_func(DEBUG_TASK_INFO, "task %d kref_%d\n",
847 task->task_id, kref_read(&task->ref));
848
849 mpp_session_pop_pending(session, task);
850
851 return ret;
852 }
853
mpp_wait_result(struct mpp_session * session,struct mpp_task_msgs * msgs)854 static int mpp_wait_result(struct mpp_session *session,
855 struct mpp_task_msgs *msgs)
856 {
857 if (likely(session->wait_result))
858 return session->wait_result(session, msgs);
859
860 pr_err("invalid NULL wait result function\n");
861 return -EINVAL;
862 }
863
mpp_attach_service(struct mpp_dev * mpp,struct device * dev)864 static int mpp_attach_service(struct mpp_dev *mpp, struct device *dev)
865 {
866 u32 taskqueue_node = 0;
867 u32 reset_group_node = 0;
868 struct device_node *np = NULL;
869 struct platform_device *pdev = NULL;
870 struct mpp_taskqueue *queue = NULL;
871 int ret = 0;
872
873 np = of_parse_phandle(dev->of_node, "rockchip,srv", 0);
874 if (!np || !of_device_is_available(np)) {
875 dev_err(dev, "failed to get the mpp service node\n");
876 return -ENODEV;
877 }
878
879 pdev = of_find_device_by_node(np);
880 of_node_put(np);
881 if (!pdev) {
882 dev_err(dev, "failed to get mpp service from node\n");
883 return -ENODEV;
884 }
885
886 mpp->srv = platform_get_drvdata(pdev);
887 platform_device_put(pdev);
888 if (!mpp->srv) {
889 dev_err(dev, "failed attach service\n");
890 return -EINVAL;
891 }
892
893 ret = of_property_read_u32(dev->of_node,
894 "rockchip,taskqueue-node", &taskqueue_node);
895 if (ret) {
896 dev_err(dev, "failed to get taskqueue-node\n");
897 return ret;
898 } else if (taskqueue_node >= mpp->srv->taskqueue_cnt) {
899 dev_err(dev, "taskqueue-node %d must less than %d\n",
900 taskqueue_node, mpp->srv->taskqueue_cnt);
901 return -ENODEV;
902 }
903 /* set taskqueue according dtsi */
904 queue = mpp->srv->task_queues[taskqueue_node];
905 if (!queue) {
906 dev_err(dev, "taskqueue attach to invalid node %d\n",
907 taskqueue_node);
908 return -ENODEV;
909 }
910 mpp_attach_workqueue(mpp, queue);
911
912 ret = of_property_read_u32(dev->of_node,
913 "rockchip,resetgroup-node", &reset_group_node);
914 if (!ret) {
915 /* set resetgroup according dtsi */
916 if (reset_group_node >= mpp->srv->reset_group_cnt) {
917 dev_err(dev, "resetgroup-node %d must less than %d\n",
918 reset_group_node, mpp->srv->reset_group_cnt);
919 return -ENODEV;
920 } else {
921 mpp->reset_group = mpp->srv->reset_groups[reset_group_node];
922 }
923 }
924
925 return 0;
926 }
927
mpp_taskqueue_init(struct device * dev)928 struct mpp_taskqueue *mpp_taskqueue_init(struct device *dev)
929 {
930 struct mpp_taskqueue *queue = devm_kzalloc(dev, sizeof(*queue),
931 GFP_KERNEL);
932 if (!queue)
933 return NULL;
934
935 mutex_init(&queue->session_lock);
936 mutex_init(&queue->pending_lock);
937 spin_lock_init(&queue->running_lock);
938 mutex_init(&queue->mmu_lock);
939 mutex_init(&queue->dev_lock);
940 INIT_LIST_HEAD(&queue->session_attach);
941 INIT_LIST_HEAD(&queue->session_detach);
942 INIT_LIST_HEAD(&queue->pending_list);
943 INIT_LIST_HEAD(&queue->running_list);
944 INIT_LIST_HEAD(&queue->mmu_list);
945 INIT_LIST_HEAD(&queue->dev_list);
946
947 /* default taskqueue has max 16 task capacity */
948 queue->task_capacity = MPP_MAX_TASK_CAPACITY;
949 atomic_set(&queue->reset_request, 0);
950 atomic_set(&queue->task_id, 0);
951
952 return queue;
953 }
954
mpp_attach_workqueue(struct mpp_dev * mpp,struct mpp_taskqueue * queue)955 static void mpp_attach_workqueue(struct mpp_dev *mpp,
956 struct mpp_taskqueue *queue)
957 {
958 s32 core_id;
959
960 INIT_LIST_HEAD(&mpp->queue_link);
961
962 mutex_lock(&queue->dev_lock);
963
964 if (mpp->core_id >= 0)
965 core_id = mpp->core_id;
966 else
967 core_id = queue->core_count;
968
969 if (core_id < 0 || core_id >= MPP_MAX_CORE_NUM) {
970 dev_err(mpp->dev, "invalid core id %d\n", core_id);
971 goto done;
972 }
973
974 if (queue->cores[core_id]) {
975 dev_err(mpp->dev, "can not attach device with same id %d", core_id);
976 goto done;
977 }
978
979 queue->cores[core_id] = mpp;
980 queue->core_count++;
981
982 set_bit(core_id, &queue->core_idle);
983 list_add_tail(&mpp->queue_link, &queue->dev_list);
984
985 mpp->core_id = core_id;
986 mpp->queue = queue;
987
988 mpp_dbg_core("%s attach queue as core %d\n",
989 dev_name(mpp->dev), mpp->core_id);
990
991 if (queue->task_capacity > mpp->task_capacity)
992 queue->task_capacity = mpp->task_capacity;
993
994 done:
995 mutex_unlock(&queue->dev_lock);
996 }
997
mpp_detach_workqueue(struct mpp_dev * mpp)998 static void mpp_detach_workqueue(struct mpp_dev *mpp)
999 {
1000 struct mpp_taskqueue *queue = mpp->queue;
1001
1002 if (queue) {
1003 mutex_lock(&queue->dev_lock);
1004
1005 queue->cores[mpp->core_id] = NULL;
1006 queue->core_count--;
1007
1008 clear_bit(queue->core_count, &queue->core_idle);
1009 list_del_init(&mpp->queue_link);
1010
1011 mpp->queue = NULL;
1012
1013 mutex_unlock(&queue->dev_lock);
1014 }
1015 }
1016
mpp_check_cmd_v1(__u32 cmd)1017 static int mpp_check_cmd_v1(__u32 cmd)
1018 {
1019 bool found;
1020
1021 found = (cmd < MPP_CMD_QUERY_BUTT) ? true : false;
1022 found = (cmd >= MPP_CMD_INIT_BASE && cmd < MPP_CMD_INIT_BUTT) ? true : found;
1023 found = (cmd >= MPP_CMD_SEND_BASE && cmd < MPP_CMD_SEND_BUTT) ? true : found;
1024 found = (cmd >= MPP_CMD_POLL_BASE && cmd < MPP_CMD_POLL_BUTT) ? true : found;
1025 found = (cmd >= MPP_CMD_CONTROL_BASE && cmd < MPP_CMD_CONTROL_BUTT) ? true : found;
1026
1027 return found ? 0 : -EINVAL;
1028 }
1029
mpp_msg_is_last(struct mpp_request * req)1030 static inline int mpp_msg_is_last(struct mpp_request *req)
1031 {
1032 int flag;
1033
1034 if (req->flags & MPP_FLAGS_MULTI_MSG)
1035 flag = (req->flags & MPP_FLAGS_LAST_MSG) ? 1 : 0;
1036 else
1037 flag = 1;
1038
1039 return flag;
1040 }
1041
mpp_get_cmd_butt(__u32 cmd)1042 static __u32 mpp_get_cmd_butt(__u32 cmd)
1043 {
1044 __u32 mask = 0;
1045
1046 switch (cmd) {
1047 case MPP_CMD_QUERY_BASE:
1048 mask = MPP_CMD_QUERY_BUTT;
1049 break;
1050 case MPP_CMD_INIT_BASE:
1051 mask = MPP_CMD_INIT_BUTT;
1052 break;
1053
1054 case MPP_CMD_SEND_BASE:
1055 mask = MPP_CMD_SEND_BUTT;
1056 break;
1057 case MPP_CMD_POLL_BASE:
1058 mask = MPP_CMD_POLL_BUTT;
1059 break;
1060 case MPP_CMD_CONTROL_BASE:
1061 mask = MPP_CMD_CONTROL_BUTT;
1062 break;
1063 default:
1064 mpp_err("unknown dev cmd 0x%x\n", cmd);
1065 break;
1066 }
1067
1068 return mask;
1069 }
1070
mpp_process_request(struct mpp_session * session,struct mpp_service * srv,struct mpp_request * req,struct mpp_task_msgs * msgs)1071 static int mpp_process_request(struct mpp_session *session,
1072 struct mpp_service *srv,
1073 struct mpp_request *req,
1074 struct mpp_task_msgs *msgs)
1075 {
1076 int ret;
1077 struct mpp_dev *mpp;
1078
1079 mpp_debug(DEBUG_IOCTL, "cmd %x process\n", req->cmd);
1080
1081 switch (req->cmd) {
1082 case MPP_CMD_QUERY_HW_SUPPORT: {
1083 u32 hw_support = srv->hw_support;
1084
1085 mpp_debug(DEBUG_IOCTL, "hw_support %08x\n", hw_support);
1086 if (put_user(hw_support, (u32 __user *)req->data))
1087 return -EFAULT;
1088 } break;
1089 case MPP_CMD_QUERY_HW_ID: {
1090 struct mpp_hw_info *hw_info;
1091
1092 mpp = NULL;
1093 if (session && session->mpp) {
1094 mpp = session->mpp;
1095 } else {
1096 u32 client_type;
1097
1098 if (get_user(client_type, (u32 __user *)req->data))
1099 return -EFAULT;
1100
1101 mpp_debug(DEBUG_IOCTL, "client %d\n", client_type);
1102 client_type = array_index_nospec(client_type, MPP_DEVICE_BUTT);
1103 if (test_bit(client_type, &srv->hw_support))
1104 mpp = srv->sub_devices[client_type];
1105 }
1106
1107 if (!mpp)
1108 return -EINVAL;
1109
1110 hw_info = mpp->var->hw_info;
1111 mpp_debug(DEBUG_IOCTL, "hw_id %08x\n", hw_info->hw_id);
1112 if (put_user(hw_info->hw_id, (u32 __user *)req->data))
1113 return -EFAULT;
1114 } break;
1115 case MPP_CMD_QUERY_CMD_SUPPORT: {
1116 __u32 cmd = 0;
1117
1118 if (get_user(cmd, (u32 __user *)req->data))
1119 return -EINVAL;
1120
1121 if (put_user(mpp_get_cmd_butt(cmd), (u32 __user *)req->data))
1122 return -EFAULT;
1123 } break;
1124 case MPP_CMD_INIT_CLIENT_TYPE: {
1125 u32 client_type;
1126
1127 if (get_user(client_type, (u32 __user *)req->data))
1128 return -EFAULT;
1129
1130 mpp_debug(DEBUG_IOCTL, "client %d\n", client_type);
1131 if (client_type >= MPP_DEVICE_BUTT) {
1132 mpp_err("client_type must less than %d\n",
1133 MPP_DEVICE_BUTT);
1134 return -EINVAL;
1135 }
1136 client_type = array_index_nospec(client_type, MPP_DEVICE_BUTT);
1137 mpp = srv->sub_devices[client_type];
1138 if (!mpp)
1139 return -EINVAL;
1140
1141 session->device_type = (enum MPP_DEVICE_TYPE)client_type;
1142 session->dma = mpp_dma_session_create(mpp->dev, mpp->session_max_buffers);
1143 session->mpp = mpp;
1144 if (mpp->dev_ops) {
1145 if (mpp->dev_ops->process_task)
1146 session->process_task =
1147 mpp->dev_ops->process_task;
1148
1149 if (mpp->dev_ops->wait_result)
1150 session->wait_result =
1151 mpp->dev_ops->wait_result;
1152
1153 if (mpp->dev_ops->deinit)
1154 session->deinit = mpp->dev_ops->deinit;
1155 }
1156 session->index = atomic_fetch_inc(&mpp->session_index);
1157 if (mpp->dev_ops && mpp->dev_ops->init_session) {
1158 ret = mpp->dev_ops->init_session(session);
1159 if (ret)
1160 return ret;
1161 }
1162
1163 mpp_session_attach_workqueue(session, mpp->queue);
1164 } break;
1165 case MPP_CMD_INIT_DRIVER_DATA: {
1166 u32 val;
1167
1168 mpp = session->mpp;
1169 if (!mpp)
1170 return -EINVAL;
1171 if (get_user(val, (u32 __user *)req->data))
1172 return -EFAULT;
1173 if (mpp->grf_info->grf)
1174 regmap_write(mpp->grf_info->grf, 0x5d8, val);
1175 } break;
1176 case MPP_CMD_INIT_TRANS_TABLE: {
1177 if (session && req->size) {
1178 int trans_tbl_size = sizeof(session->trans_table);
1179
1180 if (req->size > trans_tbl_size) {
1181 mpp_err("init table size %d more than %d\n",
1182 req->size, trans_tbl_size);
1183 return -ENOMEM;
1184 }
1185
1186 if (copy_from_user(session->trans_table,
1187 req->data, req->size)) {
1188 mpp_err("copy_from_user failed\n");
1189 return -EINVAL;
1190 }
1191 session->trans_count =
1192 req->size / sizeof(session->trans_table[0]);
1193 }
1194 } break;
1195 case MPP_CMD_SET_REG_WRITE:
1196 case MPP_CMD_SET_REG_READ:
1197 case MPP_CMD_SET_REG_ADDR_OFFSET:
1198 case MPP_CMD_SET_RCB_INFO: {
1199 msgs->flags |= req->flags;
1200 msgs->set_cnt++;
1201 } break;
1202 case MPP_CMD_POLL_HW_FINISH: {
1203 msgs->flags |= req->flags;
1204 msgs->poll_cnt++;
1205 msgs->poll_req = NULL;
1206 } break;
1207 case MPP_CMD_POLL_HW_IRQ: {
1208 if (msgs->poll_cnt || msgs->poll_req)
1209 mpp_err("Do NOT poll hw irq when previous call not return\n");
1210
1211 msgs->flags |= req->flags;
1212 msgs->poll_cnt++;
1213
1214 if (req->size && req->data) {
1215 if (!msgs->poll_req)
1216 msgs->poll_req = req;
1217 } else {
1218 msgs->poll_req = NULL;
1219 }
1220 } break;
1221 case MPP_CMD_RESET_SESSION: {
1222 int ret;
1223 int val;
1224
1225 ret = readx_poll_timeout(atomic_read,
1226 &session->task_count,
1227 val, val == 0, 1000, 500000);
1228 if (ret == -ETIMEDOUT) {
1229 mpp_err("wait task running time out\n");
1230 } else {
1231 mpp = session->mpp;
1232 if (!mpp)
1233 return -EINVAL;
1234
1235 mpp_session_clear(mpp, session);
1236 mpp_iommu_down_write(mpp->iommu_info);
1237 ret = mpp_dma_session_destroy(session->dma);
1238 mpp_iommu_up_write(mpp->iommu_info);
1239 }
1240 return ret;
1241 } break;
1242 case MPP_CMD_TRANS_FD_TO_IOVA: {
1243 u32 i;
1244 u32 count;
1245 u32 data[MPP_MAX_REG_TRANS_NUM];
1246
1247 mpp = session->mpp;
1248 if (!mpp)
1249 return -EINVAL;
1250
1251 if (req->size <= 0 ||
1252 req->size > sizeof(data))
1253 return -EINVAL;
1254
1255 memset(data, 0, sizeof(data));
1256 if (copy_from_user(data, req->data, req->size)) {
1257 mpp_err("copy_from_user failed.\n");
1258 return -EINVAL;
1259 }
1260 count = req->size / sizeof(u32);
1261 for (i = 0; i < count; i++) {
1262 struct mpp_dma_buffer *buffer;
1263 int fd = data[i];
1264
1265 mpp_iommu_down_read(mpp->iommu_info);
1266 buffer = mpp_dma_import_fd(mpp->iommu_info,
1267 session->dma, fd);
1268 mpp_iommu_up_read(mpp->iommu_info);
1269 if (IS_ERR_OR_NULL(buffer)) {
1270 mpp_err("can not import fd %d\n", fd);
1271 return -EINVAL;
1272 }
1273 data[i] = (u32)buffer->iova;
1274 mpp_debug(DEBUG_IOMMU, "fd %d => iova %08x\n",
1275 fd, data[i]);
1276 }
1277 if (copy_to_user(req->data, data, req->size)) {
1278 mpp_err("copy_to_user failed.\n");
1279 return -EINVAL;
1280 }
1281 } break;
1282 case MPP_CMD_RELEASE_FD: {
1283 u32 i;
1284 int ret;
1285 u32 count;
1286 u32 data[MPP_MAX_REG_TRANS_NUM];
1287
1288 if (req->size <= 0 ||
1289 req->size > sizeof(data))
1290 return -EINVAL;
1291
1292 memset(data, 0, sizeof(data));
1293 if (copy_from_user(data, req->data, req->size)) {
1294 mpp_err("copy_from_user failed.\n");
1295 return -EINVAL;
1296 }
1297 count = req->size / sizeof(u32);
1298 for (i = 0; i < count; i++) {
1299 ret = mpp_dma_release_fd(session->dma, data[i]);
1300 if (ret) {
1301 mpp_err("release fd %d failed.\n", data[i]);
1302 return ret;
1303 }
1304 }
1305 } break;
1306 default: {
1307 mpp = session->mpp;
1308 if (!mpp) {
1309 mpp_err("pid %d not find client %d\n",
1310 session->pid, session->device_type);
1311 return -EINVAL;
1312 }
1313 if (mpp->dev_ops->ioctl)
1314 return mpp->dev_ops->ioctl(session, req);
1315
1316 mpp_debug(DEBUG_IOCTL, "unknown mpp ioctl cmd %x\n", req->cmd);
1317 } break;
1318 }
1319
1320 return 0;
1321 }
1322
task_msgs_add(struct mpp_task_msgs * msgs,struct list_head * head)1323 static void task_msgs_add(struct mpp_task_msgs *msgs, struct list_head *head)
1324 {
1325 struct mpp_session *session = msgs->session;
1326 int ret = 0;
1327
1328 /* process each task */
1329 if (msgs->set_cnt) {
1330 /* NOTE: update msg_flags for fd over 1024 */
1331 session->msg_flags = msgs->flags;
1332 ret = mpp_process_task(session, msgs);
1333 }
1334
1335 if (!ret) {
1336 INIT_LIST_HEAD(&msgs->list);
1337 list_add_tail(&msgs->list, head);
1338 } else {
1339 put_task_msgs(msgs);
1340 }
1341 }
1342
mpp_collect_msgs(struct list_head * head,struct mpp_session * session,unsigned int cmd,void __user * msg)1343 static int mpp_collect_msgs(struct list_head *head, struct mpp_session *session,
1344 unsigned int cmd, void __user *msg)
1345 {
1346 struct mpp_msg_v1 msg_v1;
1347 struct mpp_request *req;
1348 struct mpp_task_msgs *msgs = NULL;
1349 int last = 1;
1350 int ret;
1351
1352 if (cmd != MPP_IOC_CFG_V1) {
1353 mpp_err("unknown ioctl cmd %x\n", cmd);
1354 return -EINVAL;
1355 }
1356
1357 next:
1358 /* first, parse to fixed struct */
1359 if (copy_from_user(&msg_v1, msg, sizeof(msg_v1)))
1360 return -EFAULT;
1361
1362 msg += sizeof(msg_v1);
1363
1364 mpp_debug(DEBUG_IOCTL, "cmd %x collect flags %08x, size %d, offset %x\n",
1365 msg_v1.cmd, msg_v1.flags, msg_v1.size, msg_v1.offset);
1366
1367 if (mpp_check_cmd_v1(msg_v1.cmd)) {
1368 mpp_err("mpp cmd %x is not supported.\n", msg_v1.cmd);
1369 return -EFAULT;
1370 }
1371
1372 if (msg_v1.flags & MPP_FLAGS_MULTI_MSG)
1373 last = (msg_v1.flags & MPP_FLAGS_LAST_MSG) ? 1 : 0;
1374 else
1375 last = 1;
1376
1377 /* check cmd for change msgs session */
1378 if (msg_v1.cmd == MPP_CMD_SET_SESSION_FD) {
1379 struct mpp_bat_msg bat_msg;
1380 struct mpp_bat_msg __user *usr_cmd;
1381 struct fd f;
1382
1383 /* try session switch here */
1384 usr_cmd = (struct mpp_bat_msg __user *)(unsigned long)msg_v1.data_ptr;
1385
1386 if (copy_from_user(&bat_msg, usr_cmd, sizeof(bat_msg)))
1387 return -EFAULT;
1388
1389 /* skip finished message */
1390 if (bat_msg.flag & MPP_BAT_MSG_DONE)
1391 goto session_switch_done;
1392
1393 f = fdget(bat_msg.fd);
1394 if (!f.file) {
1395 int ret = -EBADF;
1396
1397 mpp_err("fd %d get session failed\n", bat_msg.fd);
1398
1399 if (copy_to_user(&usr_cmd->ret, &ret, sizeof(usr_cmd->ret)))
1400 mpp_err("copy_to_user failed.\n");
1401 goto session_switch_done;
1402 }
1403
1404 /* NOTE: add previous ready task to queue and drop empty task */
1405 if (msgs) {
1406 if (msgs->req_cnt)
1407 task_msgs_add(msgs, head);
1408 else
1409 put_task_msgs(msgs);
1410
1411 msgs = NULL;
1412 }
1413
1414 /* switch session */
1415 session = f.file->private_data;
1416 msgs = get_task_msgs(session);
1417
1418 if (f.file->private_data == session)
1419 msgs->ext_fd = bat_msg.fd;
1420
1421 msgs->f = f;
1422
1423 mpp_debug(DEBUG_IOCTL, "fd %d, session %d msg_cnt %d\n",
1424 bat_msg.fd, session->index, session->msgs_cnt);
1425
1426 session_switch_done:
1427 /* session id should NOT be the last message */
1428 if (last)
1429 return 0;
1430
1431 goto next;
1432 }
1433
1434 if (!msgs)
1435 msgs = get_task_msgs(session);
1436
1437 if (!msgs) {
1438 pr_err("session %p:%d failed to get task msgs",
1439 session, session->index);
1440 return -EINVAL;
1441 }
1442
1443 if (msgs->req_cnt >= MPP_MAX_MSG_NUM) {
1444 mpp_err("session %d message count %d more than %d.\n",
1445 session->index, msgs->req_cnt, MPP_MAX_MSG_NUM);
1446 return -EINVAL;
1447 }
1448
1449 req = &msgs->reqs[msgs->req_cnt++];
1450 req->cmd = msg_v1.cmd;
1451 req->flags = msg_v1.flags;
1452 req->size = msg_v1.size;
1453 req->offset = msg_v1.offset;
1454 req->data = (void __user *)(unsigned long)msg_v1.data_ptr;
1455
1456 ret = mpp_process_request(session, session->srv, req, msgs);
1457 if (ret) {
1458 mpp_err("session %d process cmd %x ret %d\n",
1459 session->index, req->cmd, ret);
1460 return ret;
1461 }
1462
1463 if (!last)
1464 goto next;
1465
1466 task_msgs_add(msgs, head);
1467 msgs = NULL;
1468
1469 return 0;
1470 }
1471
mpp_msgs_trigger(struct list_head * msgs_list)1472 static void mpp_msgs_trigger(struct list_head *msgs_list)
1473 {
1474 struct mpp_task_msgs *msgs, *n;
1475 struct mpp_dev *mpp_prev = NULL;
1476 struct mpp_taskqueue *queue_prev = NULL;
1477
1478 /* push task to queue */
1479 list_for_each_entry_safe(msgs, n, msgs_list, list) {
1480 struct mpp_dev *mpp;
1481 struct mpp_task *task;
1482 struct mpp_taskqueue *queue;
1483
1484 if (!msgs->set_cnt || !msgs->queue)
1485 continue;
1486
1487 mpp = msgs->mpp;
1488 task = msgs->task;
1489 queue = msgs->queue;
1490
1491 if (queue_prev != queue) {
1492 if (queue_prev && mpp_prev) {
1493 mutex_unlock(&queue_prev->pending_lock);
1494 mpp_taskqueue_trigger_work(mpp_prev);
1495 }
1496
1497 if (queue)
1498 mutex_lock(&queue->pending_lock);
1499
1500 mpp_prev = mpp;
1501 queue_prev = queue;
1502 }
1503
1504 if (test_bit(TASK_STATE_ABORT, &task->state))
1505 pr_info("try to trigger abort task %d\n", task->task_id);
1506
1507 atomic_inc(&mpp->task_count);
1508
1509 set_bit(TASK_STATE_PENDING, &task->state);
1510 list_add_tail(&task->queue_link, &queue->pending_list);
1511 }
1512
1513 if (mpp_prev && queue_prev) {
1514 mutex_unlock(&queue_prev->pending_lock);
1515 mpp_taskqueue_trigger_work(mpp_prev);
1516 }
1517 }
1518
mpp_msgs_wait(struct list_head * msgs_list)1519 static void mpp_msgs_wait(struct list_head *msgs_list)
1520 {
1521 struct mpp_task_msgs *msgs, *n;
1522
1523 /* poll and release each task */
1524 list_for_each_entry_safe(msgs, n, msgs_list, list) {
1525 struct mpp_session *session = msgs->session;
1526
1527 if (msgs->poll_cnt) {
1528 int ret = mpp_wait_result(session, msgs);
1529
1530 if (ret) {
1531 mpp_err("session %d wait result ret %d\n",
1532 session->index, ret);
1533 }
1534 }
1535
1536 put_task_msgs(msgs);
1537
1538 }
1539 }
1540
mpp_dev_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)1541 static long mpp_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1542 {
1543 struct mpp_service *srv;
1544 struct mpp_session *session = (struct mpp_session *)filp->private_data;
1545 struct list_head msgs_list;
1546 int ret = 0;
1547
1548 mpp_debug_enter();
1549
1550 if (!session || !session->srv) {
1551 mpp_err("session %p\n", session);
1552 return -EINVAL;
1553 }
1554
1555 srv = session->srv;
1556
1557 if (atomic_read(&session->release_request) > 0) {
1558 mpp_debug(DEBUG_IOCTL, "release session had request\n");
1559 return -EBUSY;
1560 }
1561 if (atomic_read(&srv->shutdown_request) > 0) {
1562 mpp_debug(DEBUG_IOCTL, "shutdown had request\n");
1563 return -EBUSY;
1564 }
1565
1566 INIT_LIST_HEAD(&msgs_list);
1567
1568 ret = mpp_collect_msgs(&msgs_list, session, cmd, (void __user *)arg);
1569 if (ret)
1570 mpp_err("collect msgs failed %d\n", ret);
1571
1572 mpp_msgs_trigger(&msgs_list);
1573
1574 mpp_msgs_wait(&msgs_list);
1575
1576 mpp_debug_leave();
1577
1578 return ret;
1579 }
1580
mpp_dev_open(struct inode * inode,struct file * filp)1581 static int mpp_dev_open(struct inode *inode, struct file *filp)
1582 {
1583 struct mpp_session *session = NULL;
1584 struct mpp_service *srv = container_of(inode->i_cdev,
1585 struct mpp_service,
1586 mpp_cdev);
1587 mpp_debug_enter();
1588
1589 session = mpp_session_init();
1590 if (!session)
1591 return -ENOMEM;
1592
1593 session->srv = srv;
1594
1595 if (session->srv) {
1596 mutex_lock(&srv->session_lock);
1597 list_add_tail(&session->service_link, &srv->session_list);
1598 mutex_unlock(&srv->session_lock);
1599 }
1600 session->process_task = mpp_process_task_default;
1601 session->wait_result = mpp_wait_result_default;
1602 session->deinit = mpp_session_deinit_default;
1603 filp->private_data = (void *)session;
1604
1605 mpp_debug_leave();
1606
1607 return nonseekable_open(inode, filp);
1608 }
1609
mpp_dev_release(struct inode * inode,struct file * filp)1610 static int mpp_dev_release(struct inode *inode, struct file *filp)
1611 {
1612 struct mpp_session *session = filp->private_data;
1613
1614 mpp_debug_enter();
1615
1616 if (!session) {
1617 mpp_err("session is null\n");
1618 return -EINVAL;
1619 }
1620
1621 /* wait for task all done */
1622 atomic_inc(&session->release_request);
1623
1624 if (session->mpp)
1625 mpp_session_detach_workqueue(session);
1626 else
1627 mpp_session_deinit(session);
1628
1629 filp->private_data = NULL;
1630
1631 mpp_debug_leave();
1632 return 0;
1633 }
1634
1635 const struct file_operations rockchip_mpp_fops = {
1636 .open = mpp_dev_open,
1637 .release = mpp_dev_release,
1638 .unlocked_ioctl = mpp_dev_ioctl,
1639 #ifdef CONFIG_COMPAT
1640 .compat_ioctl = mpp_dev_ioctl,
1641 #endif
1642 };
1643
1644 struct mpp_mem_region *
mpp_task_attach_fd(struct mpp_task * task,int fd)1645 mpp_task_attach_fd(struct mpp_task *task, int fd)
1646 {
1647 struct mpp_mem_region *mem_region = NULL, *loop = NULL, *n;
1648 struct mpp_dma_buffer *buffer = NULL;
1649 struct mpp_dev *mpp = task->session->mpp;
1650 struct mpp_dma_session *dma = task->session->dma;
1651 u32 mem_num = ARRAY_SIZE(task->mem_regions);
1652 bool found = false;
1653
1654 if (fd <= 0 || !dma || !mpp)
1655 return ERR_PTR(-EINVAL);
1656
1657 if (task->mem_count > mem_num) {
1658 mpp_err("mem_count %d must less than %d\n", task->mem_count, mem_num);
1659 return ERR_PTR(-ENOMEM);
1660 }
1661
1662 /* find fd whether had import */
1663 list_for_each_entry_safe_reverse(loop, n, &task->mem_region_list, reg_link) {
1664 if (loop->fd == fd) {
1665 found = true;
1666 break;
1667 }
1668 }
1669
1670 mem_region = &task->mem_regions[task->mem_count];
1671 if (found) {
1672 memcpy(mem_region, loop, sizeof(*loop));
1673 mem_region->is_dup = true;
1674 } else {
1675 mpp_iommu_down_read(mpp->iommu_info);
1676 buffer = mpp_dma_import_fd(mpp->iommu_info, dma, fd);
1677 mpp_iommu_up_read(mpp->iommu_info);
1678 if (IS_ERR_OR_NULL(buffer)) {
1679 mpp_err("can't import dma-buf %d\n", fd);
1680 return ERR_PTR(-ENOMEM);
1681 }
1682
1683 mem_region->hdl = buffer;
1684 mem_region->iova = buffer->iova;
1685 mem_region->len = buffer->size;
1686 mem_region->fd = fd;
1687 mem_region->is_dup = false;
1688 }
1689 task->mem_count++;
1690 INIT_LIST_HEAD(&mem_region->reg_link);
1691 list_add_tail(&mem_region->reg_link, &task->mem_region_list);
1692
1693 return mem_region;
1694 }
1695
mpp_translate_reg_address(struct mpp_session * session,struct mpp_task * task,int fmt,u32 * reg,struct reg_offset_info * off_inf)1696 int mpp_translate_reg_address(struct mpp_session *session,
1697 struct mpp_task *task, int fmt,
1698 u32 *reg, struct reg_offset_info *off_inf)
1699 {
1700 int i;
1701 int cnt;
1702 const u16 *tbl;
1703
1704 mpp_debug_enter();
1705
1706 if (session->trans_count > 0) {
1707 cnt = session->trans_count;
1708 tbl = session->trans_table;
1709 } else {
1710 struct mpp_dev *mpp = mpp_get_task_used_device(task, session);
1711 struct mpp_trans_info *trans_info = mpp->var->trans_info;
1712
1713 cnt = trans_info[fmt].count;
1714 tbl = trans_info[fmt].table;
1715 }
1716
1717 for (i = 0; i < cnt; i++) {
1718 int usr_fd;
1719 u32 offset;
1720 struct mpp_mem_region *mem_region = NULL;
1721
1722 if (session->msg_flags & MPP_FLAGS_REG_NO_OFFSET) {
1723 usr_fd = reg[tbl[i]];
1724 offset = 0;
1725 } else {
1726 usr_fd = reg[tbl[i]] & 0x3ff;
1727 offset = reg[tbl[i]] >> 10;
1728 }
1729
1730 if (usr_fd == 0)
1731 continue;
1732
1733 mem_region = mpp_task_attach_fd(task, usr_fd);
1734 if (IS_ERR(mem_region)) {
1735 mpp_err("reg[%3d]: 0x%08x fd %d failed\n",
1736 tbl[i], reg[tbl[i]], usr_fd);
1737 return PTR_ERR(mem_region);
1738 }
1739 mpp_debug(DEBUG_IOMMU,
1740 "reg[%3d]: %d => %pad, offset %10d, size %lx\n",
1741 tbl[i], usr_fd, &mem_region->iova,
1742 offset, mem_region->len);
1743 mem_region->reg_idx = tbl[i];
1744 reg[tbl[i]] = mem_region->iova + offset;
1745 }
1746
1747 mpp_debug_leave();
1748
1749 return 0;
1750 }
1751
mpp_check_req(struct mpp_request * req,int base,int max_size,u32 off_s,u32 off_e)1752 int mpp_check_req(struct mpp_request *req, int base,
1753 int max_size, u32 off_s, u32 off_e)
1754 {
1755 int req_off;
1756
1757 if (req->offset < base) {
1758 mpp_err("error: base %x, offset %x\n",
1759 base, req->offset);
1760 return -EINVAL;
1761 }
1762 req_off = req->offset - base;
1763 if ((req_off + req->size) < off_s) {
1764 mpp_err("error: req_off %x, req_size %x, off_s %x\n",
1765 req_off, req->size, off_s);
1766 return -EINVAL;
1767 }
1768 if (max_size < off_e) {
1769 mpp_err("error: off_e %x, max_size %x\n",
1770 off_e, max_size);
1771 return -EINVAL;
1772 }
1773 if (req_off > max_size) {
1774 mpp_err("error: req_off %x, max_size %x\n",
1775 req_off, max_size);
1776 return -EINVAL;
1777 }
1778 if ((req_off + req->size) > max_size) {
1779 mpp_err("error: req_off %x, req_size %x, max_size %x\n",
1780 req_off, req->size, max_size);
1781 req->size = req_off + req->size - max_size;
1782 }
1783
1784 return 0;
1785 }
1786
mpp_extract_reg_offset_info(struct reg_offset_info * off_inf,struct mpp_request * req)1787 int mpp_extract_reg_offset_info(struct reg_offset_info *off_inf,
1788 struct mpp_request *req)
1789 {
1790 int max_size = ARRAY_SIZE(off_inf->elem);
1791 int cnt = req->size / sizeof(off_inf->elem[0]);
1792
1793 if ((cnt + off_inf->cnt) > max_size) {
1794 mpp_err("count %d, total %d, max_size %d\n",
1795 cnt, off_inf->cnt, max_size);
1796 return -EINVAL;
1797 }
1798 if (copy_from_user(&off_inf->elem[off_inf->cnt],
1799 req->data, req->size)) {
1800 mpp_err("copy_from_user failed\n");
1801 return -EINVAL;
1802 }
1803 off_inf->cnt += cnt;
1804
1805 return 0;
1806 }
1807
mpp_query_reg_offset_info(struct reg_offset_info * off_inf,u32 index)1808 int mpp_query_reg_offset_info(struct reg_offset_info *off_inf,
1809 u32 index)
1810 {
1811 mpp_debug_enter();
1812 if (off_inf) {
1813 int i;
1814
1815 for (i = 0; i < off_inf->cnt; i++) {
1816 if (off_inf->elem[i].index == index)
1817 return off_inf->elem[i].offset;
1818 }
1819 }
1820 mpp_debug_leave();
1821
1822 return 0;
1823 }
1824
mpp_translate_reg_offset_info(struct mpp_task * task,struct reg_offset_info * off_inf,u32 * reg)1825 int mpp_translate_reg_offset_info(struct mpp_task *task,
1826 struct reg_offset_info *off_inf,
1827 u32 *reg)
1828 {
1829 mpp_debug_enter();
1830
1831 if (off_inf) {
1832 int i;
1833
1834 for (i = 0; i < off_inf->cnt; i++) {
1835 mpp_debug(DEBUG_IOMMU, "reg[%d] + offset %d\n",
1836 off_inf->elem[i].index,
1837 off_inf->elem[i].offset);
1838 reg[off_inf->elem[i].index] += off_inf->elem[i].offset;
1839 }
1840 }
1841 mpp_debug_leave();
1842
1843 return 0;
1844 }
1845
mpp_task_init(struct mpp_session * session,struct mpp_task * task)1846 int mpp_task_init(struct mpp_session *session, struct mpp_task *task)
1847 {
1848 INIT_LIST_HEAD(&task->pending_link);
1849 INIT_LIST_HEAD(&task->queue_link);
1850 INIT_LIST_HEAD(&task->mem_region_list);
1851 task->state = 0;
1852 task->mem_count = 0;
1853 task->session = session;
1854
1855 return 0;
1856 }
1857
mpp_task_finish(struct mpp_session * session,struct mpp_task * task)1858 int mpp_task_finish(struct mpp_session *session,
1859 struct mpp_task *task)
1860 {
1861 struct mpp_dev *mpp = mpp_get_task_used_device(task, session);
1862
1863 if (mpp->dev_ops->finish)
1864 mpp->dev_ops->finish(mpp, task);
1865
1866 mpp_reset_up_read(mpp->reset_group);
1867 if (atomic_read(&mpp->reset_request) > 0)
1868 mpp_dev_reset(mpp);
1869 mpp_power_off(mpp);
1870
1871 set_bit(TASK_STATE_FINISH, &task->state);
1872 set_bit(TASK_STATE_DONE, &task->state);
1873 /* Wake up the GET thread */
1874 wake_up(&task->wait);
1875 mpp_taskqueue_pop_running(mpp->queue, task);
1876
1877 return 0;
1878 }
1879
mpp_task_finalize(struct mpp_session * session,struct mpp_task * task)1880 int mpp_task_finalize(struct mpp_session *session,
1881 struct mpp_task *task)
1882 {
1883 struct mpp_mem_region *mem_region = NULL, *n;
1884 struct mpp_dev *mpp = mpp_get_task_used_device(task, session);
1885
1886 /* release memory region attach to this registers table. */
1887 list_for_each_entry_safe(mem_region, n,
1888 &task->mem_region_list,
1889 reg_link) {
1890 if (!mem_region->is_dup) {
1891 mpp_iommu_down_read(mpp->iommu_info);
1892 mpp_dma_release(session->dma, mem_region->hdl);
1893 mpp_iommu_up_read(mpp->iommu_info);
1894 }
1895 list_del_init(&mem_region->reg_link);
1896 }
1897
1898 return 0;
1899 }
1900
mpp_task_dump_mem_region(struct mpp_dev * mpp,struct mpp_task * task)1901 int mpp_task_dump_mem_region(struct mpp_dev *mpp,
1902 struct mpp_task *task)
1903 {
1904 struct mpp_mem_region *mem = NULL, *n;
1905
1906 if (!task)
1907 return -EIO;
1908
1909 mpp_err("--- dump mem region ---\n");
1910 if (!list_empty(&task->mem_region_list)) {
1911 list_for_each_entry_safe(mem, n,
1912 &task->mem_region_list,
1913 reg_link) {
1914 mpp_err("reg[%3d]: %pad, size %lx\n",
1915 mem->reg_idx, &mem->iova, mem->len);
1916 }
1917 } else {
1918 dev_err(mpp->dev, "no memory region mapped\n");
1919 }
1920
1921 return 0;
1922 }
1923
mpp_task_dump_reg(struct mpp_dev * mpp,struct mpp_task * task)1924 int mpp_task_dump_reg(struct mpp_dev *mpp,
1925 struct mpp_task *task)
1926 {
1927 if (!task)
1928 return -EIO;
1929
1930 if (mpp_debug_unlikely(DEBUG_DUMP_ERR_REG)) {
1931 mpp_err("--- dump task register ---\n");
1932 if (task->reg) {
1933 u32 i;
1934 u32 s = task->hw_info->reg_start;
1935 u32 e = task->hw_info->reg_end;
1936
1937 for (i = s; i <= e; i++) {
1938 u32 reg = i * sizeof(u32);
1939
1940 mpp_err("reg[%03d]: %04x: 0x%08x\n",
1941 i, reg, task->reg[i]);
1942 }
1943 }
1944 }
1945
1946 return 0;
1947 }
1948
mpp_task_dump_hw_reg(struct mpp_dev * mpp)1949 int mpp_task_dump_hw_reg(struct mpp_dev *mpp)
1950 {
1951 if (mpp_debug_unlikely(DEBUG_DUMP_ERR_REG)) {
1952 u32 i;
1953 u32 s = mpp->var->hw_info->reg_start;
1954 u32 e = mpp->var->hw_info->reg_end;
1955
1956 mpp_err("--- dump hardware register ---\n");
1957 for (i = s; i <= e; i++) {
1958 u32 reg = i * sizeof(u32);
1959
1960 mpp_err("reg[%03d]: %04x: 0x%08x\n",
1961 i, reg, readl_relaxed(mpp->reg_base + reg));
1962 }
1963 }
1964
1965 return 0;
1966 }
1967
mpp_iommu_handle(struct iommu_domain * iommu,struct device * iommu_dev,unsigned long iova,int status,void * arg)1968 static int mpp_iommu_handle(struct iommu_domain *iommu,
1969 struct device *iommu_dev,
1970 unsigned long iova,
1971 int status, void *arg)
1972 {
1973 struct mpp_dev *mpp = (struct mpp_dev *)arg;
1974
1975 dev_err(mpp->dev, "fault addr 0x%08lx status %x\n", iova, status);
1976 mpp_task_dump_hw_reg(mpp);
1977
1978 if (mpp->iommu_info->hdl)
1979 mpp->iommu_info->hdl(iommu, iommu_dev, iova, status, arg);
1980
1981 return 0;
1982 }
1983
1984 /* The device will do more probing work after this */
mpp_dev_probe(struct mpp_dev * mpp,struct platform_device * pdev)1985 int mpp_dev_probe(struct mpp_dev *mpp,
1986 struct platform_device *pdev)
1987 {
1988 int ret;
1989 struct resource *res = NULL;
1990 struct device *dev = &pdev->dev;
1991 struct device_node *np = dev->of_node;
1992 struct mpp_hw_info *hw_info = mpp->var->hw_info;
1993
1994 /* Get disable auto frequent flag from dtsi */
1995 mpp->auto_freq_en = !device_property_read_bool(dev, "rockchip,disable-auto-freq");
1996 /* read flag for pum idle request */
1997 mpp->skip_idle = device_property_read_bool(dev, "rockchip,skip-pmu-idle-request");
1998
1999 /* read link table capacity */
2000 ret = of_property_read_u32(np, "rockchip,task-capacity",
2001 &mpp->task_capacity);
2002 if (ret)
2003 mpp->task_capacity = 1;
2004
2005 mpp->dev = dev;
2006 mpp->hw_ops = mpp->var->hw_ops;
2007 mpp->dev_ops = mpp->var->dev_ops;
2008
2009 /* Get and attach to service */
2010 ret = mpp_attach_service(mpp, dev);
2011 if (ret) {
2012 dev_err(dev, "failed to attach service\n");
2013 return -ENODEV;
2014 }
2015
2016 if (mpp->task_capacity == 1) {
2017 /* power domain autosuspend delay 2s */
2018 pm_runtime_set_autosuspend_delay(dev, 2000);
2019 pm_runtime_use_autosuspend(dev);
2020 } else {
2021 dev_info(dev, "link mode task capacity %d\n",
2022 mpp->task_capacity);
2023 /* do not setup autosuspend on multi task device */
2024 }
2025
2026 kthread_init_work(&mpp->work, mpp_task_worker_default);
2027
2028 atomic_set(&mpp->reset_request, 0);
2029 atomic_set(&mpp->session_index, 0);
2030 atomic_set(&mpp->task_count, 0);
2031 atomic_set(&mpp->task_index, 0);
2032
2033 device_init_wakeup(dev, true);
2034 pm_runtime_enable(dev);
2035
2036 mpp->irq = platform_get_irq(pdev, 0);
2037 if (mpp->irq < 0) {
2038 dev_err(dev, "No interrupt resource found\n");
2039 ret = -ENODEV;
2040 goto failed;
2041 }
2042
2043 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2044 if (!res) {
2045 dev_err(&pdev->dev, "no memory resource defined\n");
2046 ret = -ENODEV;
2047 goto failed;
2048 }
2049 /*
2050 * Tips: here can not use function devm_ioremap_resource. The resion is
2051 * that hevc and vdpu map the same register address region in rk3368.
2052 * However, devm_ioremap_resource will call function
2053 * devm_request_mem_region to check region. Thus, use function
2054 * devm_ioremap can avoid it.
2055 */
2056 mpp->reg_base = devm_ioremap(dev, res->start, resource_size(res));
2057 if (!mpp->reg_base) {
2058 dev_err(dev, "ioremap failed for resource %pR\n", res);
2059 ret = -ENOMEM;
2060 goto failed;
2061 }
2062
2063 pm_runtime_get_sync(dev);
2064 /*
2065 * TODO: here or at the device itself, some device does not
2066 * have the iommu, maybe in the device is better.
2067 */
2068 mpp->iommu_info = mpp_iommu_probe(dev);
2069 if (IS_ERR(mpp->iommu_info)) {
2070 dev_err(dev, "failed to attach iommu\n");
2071 mpp->iommu_info = NULL;
2072 }
2073 if (mpp->hw_ops->init) {
2074 ret = mpp->hw_ops->init(mpp);
2075 if (ret)
2076 goto failed_init;
2077 }
2078 /* set iommu fault handler */
2079 if (mpp->iommu_info)
2080 iommu_set_fault_handler(mpp->iommu_info->domain,
2081 mpp_iommu_handle, mpp);
2082
2083 /* read hardware id */
2084 if (hw_info->reg_id >= 0) {
2085 if (mpp->hw_ops->clk_on)
2086 mpp->hw_ops->clk_on(mpp);
2087
2088 hw_info->hw_id = mpp_read(mpp, hw_info->reg_id);
2089 if (mpp->hw_ops->clk_off)
2090 mpp->hw_ops->clk_off(mpp);
2091 }
2092
2093 pm_runtime_put_sync(dev);
2094
2095 return ret;
2096 failed_init:
2097 pm_runtime_put_sync(dev);
2098 failed:
2099 mpp_detach_workqueue(mpp);
2100 device_init_wakeup(dev, false);
2101 pm_runtime_disable(dev);
2102
2103 return ret;
2104 }
2105
mpp_dev_remove(struct mpp_dev * mpp)2106 int mpp_dev_remove(struct mpp_dev *mpp)
2107 {
2108 if (mpp->hw_ops->exit)
2109 mpp->hw_ops->exit(mpp);
2110
2111 mpp_iommu_remove(mpp->iommu_info);
2112 mpp_detach_workqueue(mpp);
2113 device_init_wakeup(mpp->dev, false);
2114 pm_runtime_disable(mpp->dev);
2115
2116 return 0;
2117 }
2118
mpp_dev_shutdown(struct platform_device * pdev)2119 void mpp_dev_shutdown(struct platform_device *pdev)
2120 {
2121 int ret;
2122 int val;
2123 struct device *dev = &pdev->dev;
2124 struct mpp_dev *mpp = dev_get_drvdata(dev);
2125
2126 dev_info(dev, "shutdown device\n");
2127
2128 atomic_inc(&mpp->srv->shutdown_request);
2129 ret = readx_poll_timeout(atomic_read,
2130 &mpp->task_count,
2131 val, val == 0, 20000, 200000);
2132 if (ret == -ETIMEDOUT)
2133 dev_err(dev, "wait total %d running time out\n",
2134 atomic_read(&mpp->task_count));
2135 else
2136 dev_info(dev, "shutdown success\n");
2137 }
2138
mpp_dev_register_srv(struct mpp_dev * mpp,struct mpp_service * srv)2139 int mpp_dev_register_srv(struct mpp_dev *mpp, struct mpp_service *srv)
2140 {
2141 enum MPP_DEVICE_TYPE device_type = mpp->var->device_type;
2142
2143 srv->sub_devices[device_type] = mpp;
2144 set_bit(device_type, &srv->hw_support);
2145
2146 return 0;
2147 }
2148
mpp_dev_irq(int irq,void * param)2149 irqreturn_t mpp_dev_irq(int irq, void *param)
2150 {
2151 struct mpp_dev *mpp = param;
2152 struct mpp_task *task = mpp->cur_task;
2153 irqreturn_t irq_ret = IRQ_NONE;
2154
2155 if (mpp->dev_ops->irq)
2156 irq_ret = mpp->dev_ops->irq(mpp);
2157
2158 if (task) {
2159 if (irq_ret != IRQ_NONE) {
2160 /* if wait or delayed work timeout, abort request will turn on,
2161 * isr should not to response, and handle it in delayed work
2162 */
2163 if (test_and_set_bit(TASK_STATE_HANDLE, &task->state)) {
2164 mpp_err("error, task has been handled, irq_status %08x\n",
2165 mpp->irq_status);
2166 irq_ret = IRQ_HANDLED;
2167 goto done;
2168 }
2169 cancel_delayed_work(&task->timeout_work);
2170 /* normal condition, set state and wake up isr thread */
2171 set_bit(TASK_STATE_IRQ, &task->state);
2172 }
2173 } else {
2174 mpp_debug(DEBUG_IRQ_CHECK, "error, task is null\n");
2175 }
2176 done:
2177 return irq_ret;
2178 }
2179
mpp_dev_isr_sched(int irq,void * param)2180 irqreturn_t mpp_dev_isr_sched(int irq, void *param)
2181 {
2182 irqreturn_t ret = IRQ_NONE;
2183 struct mpp_dev *mpp = param;
2184
2185 if (mpp->auto_freq_en &&
2186 mpp->hw_ops->reduce_freq &&
2187 list_empty(&mpp->queue->pending_list))
2188 mpp->hw_ops->reduce_freq(mpp);
2189
2190 if (mpp->dev_ops->isr)
2191 ret = mpp->dev_ops->isr(mpp);
2192
2193 /* trigger current queue to run next task */
2194 mpp_taskqueue_trigger_work(mpp);
2195
2196 return ret;
2197 }
2198
mpp_get_grf(struct mpp_grf_info * grf_info)2199 u32 mpp_get_grf(struct mpp_grf_info *grf_info)
2200 {
2201 u32 val = 0;
2202
2203 if (grf_info && grf_info->grf && grf_info->val)
2204 regmap_read(grf_info->grf, grf_info->offset, &val);
2205
2206 return (val & MPP_GRF_VAL_MASK);
2207 }
2208
mpp_grf_is_changed(struct mpp_grf_info * grf_info)2209 bool mpp_grf_is_changed(struct mpp_grf_info *grf_info)
2210 {
2211 bool changed = false;
2212
2213 if (grf_info && grf_info->grf && grf_info->val) {
2214 u32 grf_status = mpp_get_grf(grf_info);
2215 u32 grf_val = grf_info->val & MPP_GRF_VAL_MASK;
2216
2217 changed = (grf_status == grf_val) ? false : true;
2218 }
2219
2220 return changed;
2221 }
2222
mpp_set_grf(struct mpp_grf_info * grf_info)2223 int mpp_set_grf(struct mpp_grf_info *grf_info)
2224 {
2225 if (grf_info && grf_info->grf && grf_info->val)
2226 regmap_write(grf_info->grf, grf_info->offset, grf_info->val);
2227
2228 return 0;
2229 }
2230
mpp_time_record(struct mpp_task * task)2231 int mpp_time_record(struct mpp_task *task)
2232 {
2233 if (mpp_debug_unlikely(DEBUG_TIMING) && task) {
2234 task->start = ktime_get();
2235 task->part = task->start;
2236 }
2237
2238 return 0;
2239 }
2240
mpp_time_part_diff(struct mpp_task * task)2241 int mpp_time_part_diff(struct mpp_task *task)
2242 {
2243 ktime_t end;
2244 struct mpp_dev *mpp = mpp_get_task_used_device(task, task->session);
2245
2246 end = ktime_get();
2247 mpp_debug(DEBUG_PART_TIMING, "%s:%d session %d:%d part time: %lld us\n",
2248 dev_name(mpp->dev), task->core_id, task->session->pid,
2249 task->session->index, ktime_us_delta(end, task->part));
2250 task->part = end;
2251
2252 return 0;
2253 }
2254
mpp_time_diff(struct mpp_task * task)2255 int mpp_time_diff(struct mpp_task *task)
2256 {
2257 ktime_t end;
2258 struct mpp_dev *mpp = mpp_get_task_used_device(task, task->session);
2259
2260 end = ktime_get();
2261 mpp_debug(DEBUG_TIMING, "%s:%d session %d:%d time: %lld us\n",
2262 dev_name(mpp->dev), task->core_id, task->session->pid,
2263 task->session->index, ktime_us_delta(end, task->start));
2264
2265 return 0;
2266 }
2267
mpp_write_req(struct mpp_dev * mpp,u32 * regs,u32 start_idx,u32 end_idx,u32 en_idx)2268 int mpp_write_req(struct mpp_dev *mpp, u32 *regs,
2269 u32 start_idx, u32 end_idx, u32 en_idx)
2270 {
2271 int i;
2272
2273 for (i = start_idx; i < end_idx; i++) {
2274 if (i == en_idx)
2275 continue;
2276 mpp_write_relaxed(mpp, i * sizeof(u32), regs[i]);
2277 }
2278
2279 return 0;
2280 }
2281
mpp_read_req(struct mpp_dev * mpp,u32 * regs,u32 start_idx,u32 end_idx)2282 int mpp_read_req(struct mpp_dev *mpp, u32 *regs,
2283 u32 start_idx, u32 end_idx)
2284 {
2285 int i;
2286
2287 for (i = start_idx; i < end_idx; i++)
2288 regs[i] = mpp_read_relaxed(mpp, i * sizeof(u32));
2289
2290 return 0;
2291 }
2292
mpp_get_clk_info(struct mpp_dev * mpp,struct mpp_clk_info * clk_info,const char * name)2293 int mpp_get_clk_info(struct mpp_dev *mpp,
2294 struct mpp_clk_info *clk_info,
2295 const char *name)
2296 {
2297 int index = of_property_match_string(mpp->dev->of_node,
2298 "clock-names", name);
2299
2300 if (index < 0)
2301 return -EINVAL;
2302
2303 clk_info->clk = devm_clk_get(mpp->dev, name);
2304 of_property_read_u32_index(mpp->dev->of_node,
2305 "rockchip,normal-rates",
2306 index,
2307 &clk_info->normal_rate_hz);
2308 of_property_read_u32_index(mpp->dev->of_node,
2309 "rockchip,advanced-rates",
2310 index,
2311 &clk_info->advanced_rate_hz);
2312
2313 return 0;
2314 }
2315
mpp_set_clk_info_rate_hz(struct mpp_clk_info * clk_info,enum MPP_CLOCK_MODE mode,unsigned long val)2316 int mpp_set_clk_info_rate_hz(struct mpp_clk_info *clk_info,
2317 enum MPP_CLOCK_MODE mode,
2318 unsigned long val)
2319 {
2320 if (!clk_info->clk || !val)
2321 return 0;
2322
2323 switch (mode) {
2324 case CLK_MODE_DEBUG:
2325 clk_info->debug_rate_hz = val;
2326 break;
2327 case CLK_MODE_REDUCE:
2328 clk_info->reduce_rate_hz = val;
2329 break;
2330 case CLK_MODE_NORMAL:
2331 clk_info->normal_rate_hz = val;
2332 break;
2333 case CLK_MODE_ADVANCED:
2334 clk_info->advanced_rate_hz = val;
2335 break;
2336 case CLK_MODE_DEFAULT:
2337 clk_info->default_rate_hz = val;
2338 break;
2339 default:
2340 mpp_err("error mode %d\n", mode);
2341 break;
2342 }
2343
2344 return 0;
2345 }
2346
2347 #define MPP_REDUCE_RATE_HZ (50 * MHZ)
2348
mpp_get_clk_info_rate_hz(struct mpp_clk_info * clk_info,enum MPP_CLOCK_MODE mode)2349 unsigned long mpp_get_clk_info_rate_hz(struct mpp_clk_info *clk_info,
2350 enum MPP_CLOCK_MODE mode)
2351 {
2352 unsigned long clk_rate_hz = 0;
2353
2354 if (!clk_info->clk)
2355 return 0;
2356
2357 if (clk_info->debug_rate_hz)
2358 return clk_info->debug_rate_hz;
2359
2360 switch (mode) {
2361 case CLK_MODE_REDUCE: {
2362 if (clk_info->reduce_rate_hz)
2363 clk_rate_hz = clk_info->reduce_rate_hz;
2364 else
2365 clk_rate_hz = MPP_REDUCE_RATE_HZ;
2366 } break;
2367 case CLK_MODE_NORMAL: {
2368 if (clk_info->normal_rate_hz)
2369 clk_rate_hz = clk_info->normal_rate_hz;
2370 else
2371 clk_rate_hz = clk_info->default_rate_hz;
2372 } break;
2373 case CLK_MODE_ADVANCED: {
2374 if (clk_info->advanced_rate_hz)
2375 clk_rate_hz = clk_info->advanced_rate_hz;
2376 else if (clk_info->normal_rate_hz)
2377 clk_rate_hz = clk_info->normal_rate_hz;
2378 else
2379 clk_rate_hz = clk_info->default_rate_hz;
2380 } break;
2381 case CLK_MODE_DEFAULT:
2382 default: {
2383 clk_rate_hz = clk_info->default_rate_hz;
2384 } break;
2385 }
2386
2387 return clk_rate_hz;
2388 }
2389
mpp_clk_set_rate(struct mpp_clk_info * clk_info,enum MPP_CLOCK_MODE mode)2390 int mpp_clk_set_rate(struct mpp_clk_info *clk_info,
2391 enum MPP_CLOCK_MODE mode)
2392 {
2393 unsigned long clk_rate_hz;
2394
2395 if (!clk_info->clk)
2396 return -EINVAL;
2397
2398 clk_rate_hz = mpp_get_clk_info_rate_hz(clk_info, mode);
2399 if (clk_rate_hz) {
2400 clk_info->used_rate_hz = clk_rate_hz;
2401 clk_set_rate(clk_info->clk, clk_rate_hz);
2402 }
2403
2404 return 0;
2405 }
2406
2407 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
fops_show_u32(struct seq_file * file,void * v)2408 static int fops_show_u32(struct seq_file *file, void *v)
2409 {
2410 u32 *val = file->private;
2411
2412 seq_printf(file, "%d\n", *val);
2413
2414 return 0;
2415 }
2416
fops_open_u32(struct inode * inode,struct file * file)2417 static int fops_open_u32(struct inode *inode, struct file *file)
2418 {
2419 return single_open(file, fops_show_u32, PDE_DATA(inode));
2420 }
2421
fops_write_u32(struct file * file,const char __user * buf,size_t count,loff_t * ppos)2422 static ssize_t fops_write_u32(struct file *file, const char __user *buf,
2423 size_t count, loff_t *ppos)
2424 {
2425 int rc;
2426 struct seq_file *priv = file->private_data;
2427
2428 rc = kstrtou32_from_user(buf, count, 0, priv->private);
2429 if (rc)
2430 return rc;
2431
2432 return count;
2433 }
2434
2435 static const struct proc_ops procfs_fops_u32 = {
2436 .proc_open = fops_open_u32,
2437 .proc_read = seq_read,
2438 .proc_release = single_release,
2439 .proc_write = fops_write_u32,
2440 };
2441
2442 struct proc_dir_entry *
mpp_procfs_create_u32(const char * name,umode_t mode,struct proc_dir_entry * parent,void * data)2443 mpp_procfs_create_u32(const char *name, umode_t mode,
2444 struct proc_dir_entry *parent, void *data)
2445 {
2446 return proc_create_data(name, mode, parent, &procfs_fops_u32, data);
2447 }
2448 #endif
2449